diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..b033c11 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,2 @@ +src/munchkin/tests/files* linguist-vendored +src/munchkin/tests/qsharp* linguist-vendored \ No newline at end of file diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000..5f0a139 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @jfriel-oqc @keriksson-rosenqvist @owen-oqc @hamidelmaazouz @chemix-lunacy \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..a0942d3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,24 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: bug +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what happened. + +**Describe what you expected to happen** +A clear and concise description of what you wanted/expected to happen but didn't. + +**Add supporting information** +Provide as much information as possible that will allow us to help you. Including but not limited to: + +1. OS/library version. +2. Screenshots of an error or stack trace if you have one. +3. Input file used, or a snippit that causes the issue if you cannot share the full file. +4. The precise configuration options you used to run QAT. + +It is highly recommended you give _at least_ the above information, as this will help us help you. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..fec6189 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,18 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: enhancement +assignees: '' + +--- + +**Summarize your feature/enhancement** + +Give a descriptive summary of the feature/enhancement you would like to see. Try and be as precise as possible so the team can gauge whether it's something the team would like to see. + +**Specific details of what you would like to see** + +Describe the feature/enhancement in as much detail as you can, including supporting papers or external links that may act as better design references. + +The more information the better. diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml new file mode 100644 index 0000000..183a1e1 --- /dev/null +++ b/.github/workflows/build.yaml @@ -0,0 +1,26 @@ +name: Build + +on: + pull_request: + branches: [main, develop] + workflow_dispatch: + +permissions: + contents: write + actions: write + pull-requests: write + +run-name: Build from ${{ github.ref }} + +jobs: + unit-tests: + name: Unit Tests + runs-on: [ubuntu-latest] + + steps: + - uses: actions/checkout@v3 + + - name: Set up Python 3.9 + uses: actions/setup-python@v3 + with: + python-version: "3.9" diff --git a/.github/workflows/cla-assistant.yaml b/.github/workflows/cla-assistant.yaml new file mode 100644 index 0000000..126336b --- /dev/null +++ b/.github/workflows/cla-assistant.yaml @@ -0,0 +1,26 @@ +on: + issue_comment: + types: [created] + pull_request_target: + types: [opened,closed,synchronize] + +name: "CLA bot" + +jobs: + cla-acknowledgement: + runs-on: ubuntu-latest + name: "Check that the CLA has been acknowledged" + steps: + - name: "CLA Assistant" + if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the Contributor License Agreement and I hereby accept the Terms.') || github.event_name == 'pull_request_target' + uses: cla-assistant/github-action@v2.3.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PERSONAL_ACCESS_TOKEN : ${{ secrets.REPO_BOT_ACCESS_TOKEN }} + with: + branch: 'bot/data' + path-to-signatures: 'cla.json' + custom-allsigned-prcomment: 'All Contributors have signed the CLA.' + custom-pr-sign-comment: 'I have read the Contributor License Agreement and I hereby accept the Terms.' + allowlist: bot* + path-to-document: 'https://github.com/oqc-community/munchkin/blob/develop/contributor_license_agreement.md' diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ab004fc --- /dev/null +++ b/.gitignore @@ -0,0 +1,165 @@ +# Generated by Cargo +# will have compiled files and executables +target/ + +# Remove Cargo.lock from gitignore if creating an executable, leave it for libraries +# More information here https://doc.rust-lang.org/cargo/guide/cargo-toml-vs-cargo-lock.html +Cargo.lock + +# These are backup files generated by rustfmt +**/*.rs.bk + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ +.docs-venv + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# Other +/.cargo/config.toml +wheelhouse/ + +/.vscode/settings.json + +.local/ +.env/ +.vscode/ +.idea/ +.cargo/ +.vs/ +bin/ +obj/ \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..0a635df --- /dev/null +++ b/LICENSE @@ -0,0 +1,13 @@ +BSD 3-Clause License + +Copyright 2023 © Oxford Quantum Circuits Ltd + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL OXFORD QUANTUM CIRCUITS, ITS SUBSIDARIES, OR AFFILIATES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/README.md b/README.md new file mode 100644 index 0000000..4d654a0 --- /dev/null +++ b/README.md @@ -0,0 +1,33 @@ +**Munchkin** is a symbolic execution quantum-classical hybrid runtime which consumes hybrid IR's such as QIR, runs +it while performing dynamic optimizations, then calls into a provided QPU backend to run the synthesized +circuits. + +It provides hybrid computation capabilities to QPUs/tools that only have a circuit-level API available, +as well as providing a platform for dynamic optimization/lowering algorithms. + +**Note: Munchkin is still heavily work-in-progress even though its features are already useful. We'd love to hear what sort of features and ideas you think would fit in!** + +### Quick-start + +1. Install Munchkin in your favourite Python venv by running `pip install munchqin`. +2. Read the [examples](https://github.com/oqc-community/munchkin/blob/develop/examples.md). +3. (Optional) Read the [draft paper](https://github.com/oqc-community/munchkin/blob/develop/docs/Munchkin%20Draft%20v2.pdf) for a deep-dive into Munchkins concepts and data structures. + +### Contributing + +If you'd like to contribute your first destination will be to [build the system locally](https://github.com/oqc-community/munchkin/blob/develop/building.md). +There's a [getting started](https://github.com/oqc-community/munchkin/blob/develop/development.md) page that covers some of the most important bits you'd need to know about the project before jumping into writing code. + +After that feel free to fork the project and put up PRs with any work you would like to add. All experimental work that isn't ready for prime time has to be disabled by default and have no impact on existing runtime or features when it is. + +We may not accept all PRs even if we appreciate any work people would like to add. If you really want to add something but may not be sure it'll fit, please just raise an issue as a feature request. +We'll review it and either give the green light or recommended changes, potentially even advising a secondary tool that would fit better. + +Thanks for making Munchkin better than it was! + +We also have a [code of conduct](https://github.com/oqc-community/munchkin/blob/develop/code_of_conduct.md) that we expect everyone to adhere too. + +### Licence + +This code in this repository is licensed under the BSD 3-Clause Licence. +Please see [LICENSE](https://github.com/oqc-community/munchkin/blob/develop/LICENSE) for more information. diff --git a/building.md b/building.md new file mode 100644 index 0000000..c4f7199 --- /dev/null +++ b/building.md @@ -0,0 +1,34 @@ +## Building from Source + +Prerequisites: + +1. LLVM. Our build scripts can download a binary, or you can [build it yourself](https://llvm.org/docs/GettingStarted.html#getting-the-source-code-and-building-llvm). +2. [Python 3.9](https://www.python.org/downloads/). +3. [Rust](https://www.rust-lang.org/tools/install). +4. [Powershell 7](https://learn.microsoft.com/en-us/powershell/scripting/install/installing-powershell?view=powershell-7.4). + +When these tools have been downloaded you run `build.ps1` in the root project folder: `/src/munchkin/build.ps1`. This will initialize a Python venv, build the Rust projects, install the resultant wheel into that environment and run tests. + +#### LLVM + +If you want to customize how LLVM is built/found, the script has environment variables for a variety of ways to do so. The main ones are: + +```bash +MK_LLVM_EXTERNAL_DIR=/path/to/llvm # Directory to locally-built LLVM. +MK_DOWNLOAD_LLVM=true # Whether to download and build LLVM. +MK_CACHE_DIR=/where/to/extract # Where to store the downloaded LLVM build. Defaults to target which gets cleared on clean. +... +``` + +#### Potential issues + +[PyCharm] + +To get PyCharm to recognize the LLVM file path you need to add `LLVM_SYS_140_PREFIX={path_to_repo}/src/munchkin/target/llvm14-0` to the environment variables for any Rust command. You can also use a config.toml with the same value. + +[Windows] + +Main issue is to do with path lengths. These two changes may be needed: + +* Open the registry, go to `HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\FileSystem` and set `LongPathsEnabled` to 1. +* Enable long file names via git by running this: `git config --system core.longpaths true`. This will set it for every user on the system, to be only your user use `--global`. diff --git a/code_of_conduct.md b/code_of_conduct.md new file mode 100644 index 0000000..f8a34f8 --- /dev/null +++ b/code_of_conduct.md @@ -0,0 +1,61 @@ +# Code of Conduct + +We, the community of contributors and maintainers, are dedicated to nurturing an atmosphere of inclusivity and respect within this project. +Our objective is to establish a space which welcomes individuals of all backgrounds and identities, and is safe and respectful for everyone regardless of ethnicity, +gender identity and expression, level of experience, age, disability, nationality, personal appearance, race, religion, or sexual orientation. + +## Our Commitment + +With the aim of nurturing an open and inviting space, we commit to: + +- Respectfulness: Interacting with consideration and kindness, recognizing the worth of each person's input. +- Inclusiveness: Embracing contributions from all individuals of all backgrounds and actively seeking involvement from + those who have been traditionally underrepresented in the field. +- Patience: Demonstrating understanding and empathy towards varying levels of experience and viewpoints. +- Constructive Interaction: Engaging in conversations that offer positive and productive outcomes. + +## Unacceptable Behaviour + +Actions that will not be tolerated include, but are not limited to: + +- Harassment: Engaging in discriminatory behaviour, intimidation, or any form of harassment. +- Derogatory Remarks: Making negative or disrespectful comments that hinder constructive discussions. +- Personal Attacks: Undermining a collaborative environment through attacking or belittling others. +- Spam and Irrelevance: Distributing unsolicited content or repeatedly sharing irrelevant information. +- Discrimination in Any Form: Discriminating behaviour or conduct relating to, but not limited to, any of the + aforementioned characteristics. + +## Our Responsibilities + +The project maintainers are entrusted with upholding this code of conduct. +Enforcing this code is crucial for maintaining a positive, welcoming and productive environment. +Instances of inappropriate behaviour can be reported to the project team using this +[form](https://docs.google.com/forms/d/e/1FAIpQLSeyEX_txP3JDF3RQrI3R7ilPHV9JcZIyHPwLLlF6Pz7iGnocw/viewform?usp=sf_link). +All grievances will be reviewed confidentially, punctually and impartially. +The moderators have the authority to edit, remove, or decline comments, commits, code, issues, and other contributions +that conflict with this code of conduct. +Contributors who disregard this code may face temporary or permanent consequences, as determined by the project team. + +## Applicability + +This code of conduct is applicable within project spaces, encompassing project repositories, issue trackers, +communication channels, mailing lists, social media platforms and public events. + +## Enforcement guidelines + +The project team will use the following steps to enforce the standards of conduct. + +- Warning: In the case of a single incident, the project team will issue a warning to the offending party. +- Temporary Ban: If the offending party does not heed the warning and repeats behaviour which violates the code of + conduct, the offending party will be temporarily banned. + This will prohibit the offending party from any interaction with the community for a specified period of time. + Violating this may result in a permanent ban. +- Permanent Ban: Sustained behaviour which violates the code of conduct will not be tolerated and will result in a + permanent ban. + +Please note that the project team will at its absolute discretion, depending on the nature of the offence, decide the +appropriate consequences for the offending party and that the listed steps are only guidelines and may not necessarily +be applied in the presented order (e.g. a permanent ban can be issued without preceding warnings if the offence warrants +it). +If the project team bans a contributor and you consider this to be unjustified, please communicate this to the project +moderators in private by emailing qat_moderators. diff --git a/contributor_license_agreement.md b/contributor_license_agreement.md new file mode 100644 index 0000000..cba275c --- /dev/null +++ b/contributor_license_agreement.md @@ -0,0 +1,50 @@ + +## Contributor License Agreement (CLA) + +We are incredibly thankful for the contributions we receive from the community. + +If you would like to contribute to our project, you must first agree to our Contributor License Agreement (CLA). This allows a contributor to retain their ownership in the code submitted while granting us with the necessary legal rights to use that contribution. By contributing to OQC’s Qat project (Project), and in consideration of being granted permission to be involved in it and to publicise your contributions through it, you agree to the following CLA terms and conditions, which shall apply to all your past, present and future contributions to the project. + +Agreeing to this CLA explicitly states that you are entitled to provide a contribution (Contribution), that you cannot withdraw permission to use your Contribution at a later date, and that you grant OQC (as defined below), its licensees, successors and assigns permission to use your Contribution in commercial products without further reference or any payment to you. + +## Definitions + +“You” (or “Your”) shall mean the copyright owner or legal entity authorised by the copyright owner that is making this CLA with the Project. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes of this definition, “control” means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +“Contribution” shall mean the code, documentation or other original works of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to the Project for inclusion in, or documentation of, any of the products owned or managed by the Project (the “Work”). For the purposes of this definition, “submitted” means any form of electronic, verbal, or written communication or upload sent to the Project or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Project for the purpose of discussing and improving the Work. + +“OQC” means Oxford Quantum Circuits Limited (company 10803852) of Thames Valley Science Park 1 Collegiate Square, South Ave, Reading, United Kingdom, RG2 9LH. + +## License + +You hereby represent that all present, past and future contributions are governed by the BSD-3 copyright statement unless stipulated otherwise. This entails that to the extent possible under law, you transfer all copyright and related or neighboring rights of the code or documents you contribute to the Project itself or its maintainers. Furthermore, you also represent that you have the authority to perform the above waiver with respect to the entirety of your Contributions. + +You hereby grant to OQC and to recipients of software distributed by the Project a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute Your Contributions and such derivative works. Such license shall be freely assignable and sub-licensable. + +OQC may add to or change these Terms and Conditions without notice including updating, amending, or varying this agreement or introducing a new licensing arrangement. As such You agree that OQC is entitled to amend these Terms and Conditions at any time without Your prior permission. + +## Moral Rights + +To the fullest extent permitted under applicable law, you hereby waive, and agree not to assert, any or all of your “moral rights” in or relating to Your Contributions for the benefit of the project. + +## Third Party Content + +If Your Contribution includes or is based on any source code, object code, bug fixes, configuration changes, tools, specifications, documentation, data, materials, feedback, information or other works of authorship that were not authored by you (“Third Party Content”) or if You are aware of any third party intellectual property or proprietary rights associated with your Contribution (“Third Party Rights”), then you agree to include with the submission of Your Contribution full details respecting such Third Party Content and Third Party Rights, including, without limitation, identification of which aspects of your Contribution contain Third Party Content or are associated with Third Party Rights, the owner/author of the Third Party Content and Third Party Rights, where You obtained the Third Party Content, and any applicable third party license terms or restrictions respecting the Third Party Content and Third Party Rights. If use of any Third Party Consent requires a license then you must obtain one. For greater certainty, the foregoing obligations respecting the identification of Third Party Content and Third Party Rights do not apply to any portion of a Project that is incorporated into Your Contribution to that same Project. + +## Representations + +You represent that, other than the Third Party Content and Third Party Rights identified by you in accordance with this Agreement, you are the sole author of your Contributions and are legally entitled to grant the foregoing licenses and waivers in respect of your Contributions. If your Contributions were created in the course of your employment with your past or present employer(s), you represent that such employer(s) has authorised you to make your Contributions on behalf of such employer(s) or such employer (s) has waived all of their right, title or interest in or to your Contributions. + +## Disclaimer + +To the fullest extent permitted under applicable law, your Contributions are provided on an "as is" basis, without any warranties or conditions, express or implied, including, without limitation, any implied warranties or conditions of non-infringement, merchantability or fitness for a particular purpose. You are not required to provide support for your Contributions, except to the extent you desire to provide support. + +## No Obligation +You acknowledge that OQC is under no obligation to use or incorporate your contributions into the Project. The decision to use or incorporate your contributions into the Project will be made at the sole discretion of OQC or its authorised delegates. + +## Severability +If any term of this CLA is held invalid or unenforceable for any reason, the remainder of the term and this CLA will continue in full force and effect. + +## Law and jurisdiction +This CLA and any dispute or claim (including non-contractual disputes or claims) arising out of or in connection with it or its subject matter or formation shall be governed by and construed in accordance with the law of England and Wales. OQC and You irrevocably agree that the courts of England and Wales shall have exclusive jurisdiction to settle any dispute or claim (including non-contractual disputes or claims) arising out of or in connection with this CLA or its subject matter or formation provided that OQC may apply for an injunction or other interim relief and enforce a judgment awarded it by the English courts in any court of competent jurisdiction. + diff --git a/development.md b/development.md new file mode 100644 index 0000000..5e78836 --- /dev/null +++ b/development.md @@ -0,0 +1,20 @@ +### Rust++ + +Our Rust code is actually more like C++ due to some fun pointer manipulation we do and heavily interlinked data structures. + +The main points to know are: + +1. We have a custom ref-counted smart-pointer that acts like any other ref-counted smart pointer whose backing is a raw pointer. +2. The smart-pointer can point to anything pointer-like: actual pointers, references, mutable references, anything you can actually get a pointer too. +3. It uses macros to manipulate/call/fetch the pointers directly and avoid pointer-to-ref compiler issues. +4. You can mutate anything at any time through the smart-pointer and its macros, so mutability keywords are irrelevant. + +In most situations you can just treat our smart-pointer like a normal `Rc` and don't need to care about its internals. +Except if you steal a reference, then it's on you to make sure the memory is not referenced outside its lifetime - the old fashioned way. + +Otherwise you can just write Rust as normal, and anything not within a smart-pointer still has the usual rules. + +Note: If you are concerned about this or are surprised it works at all - raw pointers have special designation, specially those in `UnsafeCell`'s. +We are leaning upon some pretty niche documented constraints to keep within the bounds of Rusts expectations, if barely, but they are all official. + +The language _almost_ works for what we need, so we bend it to meet that. diff --git a/docs/Munchkin Draft v2.pdf b/docs/Munchkin Draft v2.pdf new file mode 100644 index 0000000..da4e66d Binary files /dev/null and b/docs/Munchkin Draft v2.pdf differ diff --git a/examples.md b/examples.md new file mode 100644 index 0000000..9a04da1 --- /dev/null +++ b/examples.md @@ -0,0 +1,105 @@ +### Getting started + +To run Munchkin you'll need a QIR file, whether in its human-readable .ll form or bitcode. +We have some pre-built QIR that we use for tests (`src/munchkin/tests/qsharp`) that you can use and modify if needed. + +For our first example we're going to use the default simulator backend, so will not delve into the details required to add your own. + +Since we're not providing a custom backend our Python to run Munchkin is relatively simple. +If your QIR has no return value or arguments, this is how you call it: +```python +from munchqin.simulators import fetch_qasm_runtime + +# Create a QASM simulation backend with 20 qubits available. +runtime = fetch_qasm_runtime(20) +runtime.run("path_to_ll_file") +``` + +If your QIR entry-point has an argument, lets say a string and int, this is what you need: +```python +# ... +runtime.run("path_to_ll_file", ["arg_1", 5]) +``` + +And finally if your call also returns an argument, it would look like this: +```python +# ... +results = runtime.run("path_to_ll_file", ["arg_1", 5]) +assert results == 42 +``` + +This then runs your QIR locally and fires off any quantum fragments to the backend simulator. + +Note: while Munchkin accepts full-spec QIR and most classical LLVM instructions it doesn't allow system calls such as I/O or sockets. +Such calls will simply be ignored if used in core logic will cause an exception. + +If you want to use such things they have to be passed in as arguments or done outside Munchkins execution loop. + +### Backends + +If you want to intercept Munchkins quantum executions and redirect it for your own uses you will need to use two additional objects - the `BuilderAdaptor` and `RuntimeAdaptor`. + +Both of these are shim API's whose methods Munchkin expects to exist on whatever Python object you pass it. + +`BuilderAdaptor` is a gate- and instruction-level API that will be called to build your circuit after Munchkin knows what's needed and wants to get a result. +It's called sequentially with all the gates and instructions that are going to be used with the incoming execution. + +`RuntimeAdaptor` is the execution API. It will get called with the builder it wants to be executed and will wait for a result. Results must be in a bit string results distribution: + +```python +{ + "010": 250, + "111": 182, + ... +} +``` + +Both API's can be found [here](https://github.com/oqc-community/munchkin/blob/develop/src/munchkin/pykin/pykin/adaptors.py). + +So you now have a custom builder and runtime all you need to do then is to make sure they get run correctly. This is where you would use a `MunchkinRuntime` to help: +```python +from munchqin.adaptors import BuilderAdaptor, RuntimeAdaptor +from munchqin.runtime import MunchkinRuntime + +class CustomBuilder(BuilderAdaptor): + ... + +class CustomRuntime(RuntimeAdaptor): + ... + +runtime = MunchkinRuntime(CustomBuilder(), CustomRuntime()) +runtime.run("path_to_qir") +``` + +The `fetch_qasm_runtime` method we used earlier is simply a wrapper which loads our QASM builder and runtime in. + +With that, our custom classes will now be called when a quantum execution is needed, well if we put in the various methods anyway. + +If you'd like a template, our [QASM backends](https://github.com/oqc-community/munchkin/blob/develop/src/munchkin/pykin/pykin/simulators.py) can provide one. + +### Debugging + +Symbolic execution engines are complicated by their nature so debugging it can be a little tricky unless you understand it's output. + +The runtime itself exposes various tracing mechanisms that you can activate for a run: +```python +from munchqin.runtime import MunchkinRuntime + +runtime = MunchkinRuntime(...) + +# Prints out every step the runtime takes. +runtime.trace_runtime() + +# Prints out all the information of a quantum projections execution and analysis. +# (The thing which compresses and then sends the circuit to the builders) +runtime.trace_projections() + +# Outputs the entire graph that we're going to run. +runtime.trace_graphs() +``` + +By default, these are all printed to the console. You can initialize Munchkins file logging mechanism by calling `initialize_logger` with a file path. +This is recommended if you enable traces as it produces a _lot_ of output. + +It should also be mentioned that traces are not lightweight and should only be used for debugging or informational purposes. +They should not be left on in a live system. \ No newline at end of file diff --git a/src/munchkin/.markdownlint.yaml b/src/munchkin/.markdownlint.yaml new file mode 100644 index 0000000..24bd932 --- /dev/null +++ b/src/munchkin/.markdownlint.yaml @@ -0,0 +1,19 @@ +default: true +line-length: false + +# Allow bare URLs since they work well in GitHub renderings. +no-bare-urls: false + +# Require unordered lists to be typeset with dashes. +ul-style: + style: dash + +# Allow duplicate headings if they are not siblings. +no-duplicate-heading: + siblings_only: true + +no-inline-html: + allowed_elements: + # We want to enable a more readable alternative to the "two spaces at the + # end of a line" default in Markdown. + - br diff --git a/src/munchkin/Cargo.toml b/src/munchkin/Cargo.toml new file mode 100644 index 0000000..e318595 --- /dev/null +++ b/src/munchkin/Cargo.toml @@ -0,0 +1,3 @@ +[workspace] +members = ["pykin", "build-llvm"] +resolver = "2" diff --git a/src/munchkin/build-llvm/CMakeLists.txt b/src/munchkin/build-llvm/CMakeLists.txt new file mode 100644 index 0000000..5a7b292 --- /dev/null +++ b/src/munchkin/build-llvm/CMakeLists.txt @@ -0,0 +1,86 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +cmake_minimum_required(VERSION 3.10.0) + +project(qirlib_external) +include(ExternalProject) + +# Environment variables +# Download vars +# - MK_DOWNLOAD_LLVM => If set, download pre-built LLVM binaries. +# - MK_LLVM_BUILDS_URL => URL root where downloads are hosted. +# - MK_LLVM_PKG_NAME => name of package to be downloaded/created +# - CPACK_PACKAGE_FILE_NAME +# Build vars +# - MK_LLVM_TAG +# Both +# - CMAKE_INSTALL_PREFIX + +if ($ENV{MK_DOWNLOAD_LLVM} MATCHES "true") + message (STATUS "Install LLVM to ${CMAKE_INSTALL_PREFIX}") + + set(SHA256_NAME $ENV{MK_LLVM_PKG_NAME}.sha256) + message (STATUS "Download file: $ENV{MK_LLVM_BUILDS_URL}/$ENV{MK_LLVM_PKG_NAME}") + message (STATUS "Download sha:$ENV{MK_LLVM_BUILDS_URL}/${SHA256_NAME}") + + # download the expected archive hash + file(DOWNLOAD "$ENV{MK_LLVM_BUILDS_URL}/${SHA256_NAME}" ${CMAKE_BINARY_DIR}/${SHA256_NAME} STATUS SHA256_DOWNLOAD_STATUS) + list(GET SHA256_DOWNLOAD_STATUS 0 SHA256_DOWNLOAD_STATUS) + if (NOT (${SHA256_DOWNLOAD_STATUS} EQUAL 0)) + list(GET SHA256_DOWNLOAD_STATUS 1 ERROR_MESSAGE) + message(FATAL_ERROR "Failed to download sha256 hash: ${ERROR_MESSAGE}") + endif () + + # load the expected archive hash into a variable for cmake to check + file(STRINGS ${CMAKE_BINARY_DIR}/${SHA256_NAME} LLVM_SHA256_SUM REGEX [0-9a-fA-F]+) + string(STRIP ${LLVM_SHA256_SUM} LLVM_SHA256_SUM) + + ExternalProject_Add(llvm + URL $ENV{MK_LLVM_BUILDS_URL}/$ENV{MK_LLVM_PKG_NAME} + URL_HASH SHA256=${LLVM_SHA256_SUM} + CONFIGURE_COMMAND "" + SOURCE_DIR ${CMAKE_INSTALL_PREFIX} + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + USES_TERMINAL_DOWNLOAD TRUE + ) +else() + # The outer cmake call from rust will configure CMAKE_INSTALL_PREFIX + # Unless the user overrides the install path rust will configure it to + # be the crate OUT_DIR. + # The build will define CPACK_PACKAGE_FILE_NAME passing it to the LLVM build. + # Set large path limit for windows when it dosen't infer correctly. + set (EXTERNAL_EXTRA_CMAKE_ARGS) + list (APPEND EXTERNAL_EXTRA_CMAKE_ARGS + -DCMAKE_INSTALL_PREFIX=${CMAKE_INSTALL_PREFIX} + -DCMAKE_OBJECT_PATH_MAX=2000 + -DCPACK_PACKAGE_FILE_NAME=${CPACK_PACKAGE_FILE_NAME} + ) + + ExternalProject_Add(llvm + GIT_REPOSITORY https://github.com/llvm/llvm-project.git + GIT_TAG $ENV{MK_LLVM_TAG} + GIT_SHALLOW TRUE + GIT_PROGRESS TRUE + SOURCE_SUBDIR llvm + CONFIGURE_COMMAND ${CMAKE_COMMAND} -G Ninja -C ${CMAKE_CURRENT_LIST_DIR}/config.cmake ${EXTERNAL_EXTRA_CMAKE_ARGS} + BUILD_COMMAND ninja + USES_TERMINAL_DOWNLOAD TRUE + USES_TERMINAL_CONFIGURE TRUE + USES_TERMINAL_BUILD TRUE + USES_TERMINAL_INSTALL TRUE + ) + + ExternalProject_Add_Step(llvm package + COMMAND ninja package + WORKING_DIRECTORY + COMMENT "package llvm" + DEPENDEES build configure + ALWAYS TRUE + EXCLUDE_FROM_MAIN TRUE + USES_TERMINAL TRUE + ) + ExternalProject_Add_StepTargets(llvm package) +endif() \ No newline at end of file diff --git a/src/munchkin/build-llvm/Cargo.toml b/src/munchkin/build-llvm/Cargo.toml new file mode 100644 index 0000000..1593885 --- /dev/null +++ b/src/munchkin/build-llvm/Cargo.toml @@ -0,0 +1,52 @@ +[package] +name = "build-llvm" +version = "0.1.0" +edition = "2021" +license = "" +description = "" +build = "build.rs" +readme = "README.md" +homepage = "" +repository = "" +rust-version = "1.64" + +[dependencies] +bitvec = "1.0" +const-str = "0.5" +lazy_static = "1.4" +llvm-sys-140 = { package = "llvm-sys", version = "140.0", optional = true } +log = "0.4" +mut_static = "5.0" + +[dev-dependencies] +serial_test = "0.9" +normalize-line-endings = "0.3" + +[build-dependencies] +cmake = "0.1" +cc = "1.0" +lazy_static = "1.4" + +[features] +llvm14-0 = ["llvm-sys-140"] + +# default to use llvm-sys for llvm linking +default = ["external-llvm-linking", "llvm14-0"] +external-llvm-linking = [] + +# disable linking for local installation or packaging +# no-llvm-linking is marker used in the cfg checks +llvm14-0-no-llvm-linking = ["llvm14-0", "no-llvm-linking", "llvm-sys-140/disable-alltargets-init", "llvm-sys-140/no-llvm-linking"] + +no-llvm-linking = [] + +# let us do the llvm linking +# internal-llvm-linking is marker used in the cfg checks +llvm14-0-qirlib-llvm-linking = ["llvm14-0", "internal-llvm-linking", "llvm-sys-140/disable-alltargets-init", "llvm-sys-140/no-llvm-linking"] + +internal-llvm-linking = [] +download-llvm = [] +build-llvm = [] + +# Dev use only for packaging LLVM builds +package-llvm = ["build-llvm", "no-llvm-linking"] diff --git a/src/munchkin/build-llvm/build.rs b/src/munchkin/build-llvm/build.rs new file mode 100644 index 0000000..2123f4c --- /dev/null +++ b/src/munchkin/build-llvm/build.rs @@ -0,0 +1,352 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +use std::boxed::Box; +use std::env; +use std::error::Error; +use std::fs; +use std::path::{Path, PathBuf}; + +use cc::Build; +use cmake::Config; + +mod external; +use external::llvm_sys; + +extern crate cc; +#[macro_use] +extern crate lazy_static; + +// Make sure one version of llvm features is used +#[cfg(all( + not(any(feature = "llvm11-0")), + not(any(feature = "llvm12-0")), + not(any(feature = "llvm13-0")), + not(any(feature = "llvm14-0")), +))] +compile_error!("One of the features `llvm11-0`, `llvm12-0`, `llvm13-0`, and `llvm14-0` must be used exclusive."); + +// Make sure only one llvm option is used. +#[cfg(any( + all( + feature = "llvm11-0", + any(feature = "llvm12-0", feature = "llvm13-0", feature = "llvm14-0") + ), + all( + feature = "llvm12-0", + any(feature = "llvm11-0", feature = "llvm13-0", feature = "llvm14-0") + ), + all( + feature = "llvm13-0", + any(feature = "llvm11-0", feature = "llvm12-0", feature = "llvm14-0") + ), + all( + feature = "llvm14-0", + any(feature = "llvm11-0", feature = "llvm12-0", feature = "llvm13-0") + ), +))] +compile_error!("Features `llvm11-0`, `llvm12-0`, `llvm13-0`, and `llvm14-0` must be used exclusive."); + +// Make sure one of the linking features is used +#[cfg(all( + not(any(feature = "qirlib-llvm-linking")), + not(any(feature = "external-llvm-linking")), + not(any(feature = "no-llvm-linking")), +))] +compile_error!("One of the features `qirlib/qirlib-llvm-linking`, `qirlib/external-llvm-linking`, and `qirlib/no-llvm-linking` must be used exclusive."); + +// Make sure only one linking option is used. +#[cfg(any( + all( + feature = "qirlib-llvm-linking", + any(feature = "external-llvm-linking", feature = "no-llvm-linking") + ), + all( + feature = "external-llvm-linking", + any(feature = "qirlib-llvm-linking", feature = "no-llvm-linking") + ), + all( + feature = "no-llvm-linking", + any(feature = "qirlib-llvm-linking", feature = "external-llvm-linking") + ), +))] +compile_error!("Features `qirlib/qirlib-llvm-linking`, `qirlib/external-llvm-linking`, and `qirlib/no-llvm-linking` are mutually exclusive."); + +// if we are building or downloading, we cannot be externally linking +#[cfg(any( + all( + feature = "build-llvm", + any(feature = "download-llvm", feature = "external-llvm-linking") + ), + all( + feature = "download-llvm", + any(feature = "build-llvm", feature = "external-llvm-linking") + ), +))] +compile_error!("Features `qirlib/build-llvm` and `qirlib/download-llvm` are mutually exclusive."); + +fn main() -> Result<(), Box> { + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=config.cmake"); + println!("cargo:rerun-if-changed=CMakeLists.txt"); + + let install_dir = get_llvm_install_dir(); + println!("cargo:rerun-if-changed={:?}", install_dir); + + // llvm-sys components + println!("cargo:rerun-if-changed=external.rs"); + + // Download vars passed to cmake + println!("cargo:rerun-if-env-changed=MK_DOWNLOAD_LLVM"); + println!("cargo:rerun-if-env-changed=MK_LLVM_BUILDS_URL"); + println!("cargo:rerun-if-env-changed=MK_LLVM_PKG_NAME"); + + // Package vars used only in here + println!("cargo:rerun-if-env-changed=MK_PKG_DEST"); + + // Build vars passed to cmake + println!("cargo:rerun-if-env-changed=MK_LLVM_TAG"); + + // maps to CPACK_PACKAGE_FILE_NAME + println!("cargo:rerun-if-env-changed=MK_PACKAGE_FILE_NAME"); + + // maps to CMAKE_INSTALL_PREFIX passed to cmake in build and download + println!("cargo:rerun-if-env-changed=MK_CACHE_DIR"); + + if cfg!(feature = "download-llvm") { + println!("Downloading llvm"); + download_llvm()?; + } else if cfg!(feature = "build-llvm") { + println!("Building llvm"); + compile_llvm()?; + } + if cfg!(feature = "qirlib-llvm-linking") { + println!("Linking llvm"); + link_llvm(); + let build_dir = get_build_dir()?; + compile_target_wrappers(&build_dir)?; + } else if cfg!(feature = "external-llvm-linking") { + println!("LLVM_SYS_{{}}_PREFIX will provide the LLVM linking"); + } else { + println!("No LLVM linking"); + } + + Ok(()) +} + +fn download_llvm() -> Result<(), Box> { + // If the download url isn't set, we need to immediately fail. + let url = env::var("MK_LLVM_BUILDS_URL")?; + + let enable_download = env::var("MK_DOWNLOAD_LLVM").unwrap_or_else(|_| "true".to_owned()); + + let build_dir = get_build_dir()?; + + let mut config = Config::new(build_dir); + config + .generator("Ninja") + .no_build_target(true) + .env("MK_LLVM_PKG_NAME", get_package_file_name()?) + .env("MK_LLVM_BUILDS_URL", url) + .env("MK_DOWNLOAD_LLVM", enable_download) + .define("CPACK_PACKAGE_FILE_NAME", get_package_name()?) + .define("CMAKE_INSTALL_PREFIX", get_llvm_install_dir()) + .very_verbose(true); + let _ = config.build(); + + Ok(()) +} + +fn get_llvm_compile_target() -> String { + // We always install unless package is chosen. + // The user's choices for CMAKE_INSTALL_PREFIX will choose whether + // the installation goes into the target folder for linking or + // into another dir for potential reuse + if cfg!(feature = "package-llvm") { + "llvm-prefix/src/llvm-stamp/llvm-package".to_owned() + } else { + "llvm-prefix/src/llvm-stamp/llvm-install".to_owned() + } +} + +fn compile_llvm() -> Result<(), Box> { + let build_dir = get_build_dir()?; + let mut config = Config::new(build_dir); + + config + .generator("Ninja") + .build_target(get_llvm_compile_target().as_str()) + .env("MK_LLVM_TAG", get_llvm_tag()) + .define("CPACK_PACKAGE_FILE_NAME", get_package_name()?) + .define("CMAKE_INSTALL_PREFIX", get_llvm_install_dir()); + let _ = config.build(); + + if cfg!(feature = "package-llvm") { + package_llvm()?; + } + Ok(()) +} + +fn package_llvm() -> Result<(), Box> { + let out_dir = env::var("OUT_DIR").expect("Could not get OUT_DIR environment variable"); + let output = PathBuf::from(out_dir) + .join("build") + .join("llvm-prefix") + .join("src") + .join("llvm-build") + .join(get_package_file_name()?); + + if let Ok(dest_dir) = env::var("MK_PKG_DEST") { + let dest = PathBuf::from(dest_dir).join(get_package_file_name()?); + println!( + "Moving {} to {}.", + output.as_path().display(), + dest.as_path().display() + ); + fs::rename(output, dest)?; + } else { + println!("Not moving package output. MK_PKG_DEST not set."); + } + + Ok(()) +} + +fn get_build_dir() -> Result> { + let manifest_dir = env::var("CARGO_MANIFEST_DIR")?; + let build_dir = PathBuf::from(manifest_dir.as_str()); + let normalized_build_dir = fs::canonicalize(build_dir)?; + println!( + "llvm build files dir: {}", + normalized_build_dir.to_str().unwrap() + ); + Ok(normalized_build_dir) +} + +fn link_llvm() { + let libdir = llvm_sys::llvm_config("--libdir"); + + // Export information to other crates + println!( + "cargo:config_path={}", + llvm_sys::LLVM_CONFIG_PATH.clone().unwrap().display() + ); // will be DEP_MK_CONFIG_PATH + println!("cargo:libdir={}", libdir); // DEP_KM_LIBDIR + + // Link LLVM libraries + println!("cargo:rustc-link-search=native={}", libdir); + for name in llvm_sys::get_link_libraries() { + println!("cargo:rustc-link-lib=static={}", name); + } + + // Link system libraries + for name in llvm_sys::get_system_libraries() { + println!("cargo:rustc-link-lib=dylib={}", name); + } +} + +fn compile_target_wrappers(build_dir: &Path) -> Result<(), Box> { + let target_c = build_dir.join("target.c").canonicalize()?; + env::set_var("CFLAGS", llvm_sys::get_llvm_cflags()); + Build::new().file(target_c).compile("targetwrappers"); + Ok(()) +} + +fn get_package_file_name() -> Result> { + let mut base_name = get_package_name()?; + + if llvm_sys::target_os_is("windows") { + base_name.push_str(".zip"); + } else { + base_name.push_str(".tar.gz"); + } + + Ok(base_name) +} + +fn get_llvm_tag() -> String { + if let Ok(tag) = env::var("MK_LLVM_TAG") { + tag + } else if cfg!(feature = "llvm11-0") { + "llvmorg-11.1.0".to_owned() // 1fdec59bf + } else if cfg!(feature = "llvm12-0") { + "llvmorg-12.0.1".to_owned() // fed4134 + } else if cfg!(feature = "llvm13-0") { + "llvmorg-13.0.1".to_owned() // 75e33f7 + } else if cfg!(feature = "llvm14-0") { + "llvmorg-14.0.6".to_owned() // 28c006 + } else { + panic!("Unsupported LLVM version. The LLVM feature flags or MK_LLVM_TAG must be set.") + } +} + +fn get_package_name() -> Result> { + if let Ok(file_name) = env::var("MK_PACKAGE_FILE_NAME") { + Ok(file_name) + } else { + let tag = get_llvm_tag(); + let triple = get_target_triple()?; + let package_name = format!("mk-llvm-{}-{}", triple, tag); + Ok(package_name) + } +} + +fn get_target_triple() -> Result> { + let target = if llvm_sys::target_os_is("windows") { + // TODO: remove static linking and just return the TARGET + "x86_64-pc-windows-msvc-static".to_owned() + } else { + env::var("TARGET")? + }; + Ok(target) +} + +fn get_llvm_install_dir() -> PathBuf { + if let Ok(path) = env::var("MK_CACHE_DIR") { + PathBuf::from(path) + } else { + // if we install to OUT_DIR the llvm install task during the extraction + // of the archive will empty the target directory breaking the build. + // To avoid that, we put llvm binaries into the OUT_DIR/llvm folder. + let out_dir = env::var("OUT_DIR").expect("Could not get OUT_DIR environment variable"); + PathBuf::from(out_dir).join("llvm") + } +} + +fn locate_llvm_config() -> Option { + let major = if cfg!(feature = "llvm11-0") { + "11" + } else if cfg!(feature = "llvm12-0") { + "12" + } else if cfg!(feature = "llvm13-0") { + "13" + } else if cfg!(feature = "llvm14-0") { + "14" + } else { + "unknown" + }; + if let Ok(path) = env::var(format!("DEP_LLVM_{major}_CONFIG_PATH")) { + Some(PathBuf::from(path)) + } else { + let dir = get_llvm_install_dir(); + println!("Looking in {:?}", dir); + let prefix = dir.join("bin"); + let binary_name = llvm_config_name(); + let binary_path = prefix.join(binary_name); + if binary_path.as_path().exists() { + Some(binary_path) + } else { + None + } + } +} + +pub fn llvm_config_name() -> String { + let mut base_name = "llvm-config".to_owned(); + + // On Windows, also search for llvm-config.exe + if llvm_sys::target_os_is("windows") { + base_name.push_str(".exe"); + } + + base_name +} diff --git a/src/munchkin/build-llvm/config.cmake b/src/munchkin/build-llvm/config.cmake new file mode 100644 index 0000000..6b3445b --- /dev/null +++ b/src/munchkin/build-llvm/config.cmake @@ -0,0 +1,93 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +message(STATUS CMAKE_HOST_SYSTEM_NAME=${CMAKE_HOST_SYSTEM_NAME}) + +if (${CMAKE_HOST_SYSTEM_NAME} MATCHES "Windows") + find_program(SCCACHE sccache) + if(SCCACHE) + set(LLVM_CCACHE_BUILD OFF CACHE BOOL "") + set_property(GLOBAL PROPERTY RULE_LAUNCH_COMPILE "${SCCACHE}") + get_property(rule_launch_property GLOBAL PROPERTY RULE_LAUNCH_COMPILE) + message(STATUS RULE_LAUNCH_COMPILE=${rule_launch_property}) + set(CMAKE_C_COMPILER_LAUNCHER "${SCCACHE}" CACHE STRING "") + message(STATUS CMAKE_C_COMPILER_LAUNCHER=${CMAKE_C_COMPILER_LAUNCHER}) + set(CMAKE_CXX_COMPILER_LAUNCHER "${SCCACHE}" CACHE STRING "") + message(STATUS CMAKE_CXX_COMPILER_LAUNCHER=${CMAKE_CXX_COMPILER_LAUNCHER}) + else() + message(STATUS "Not using sccache") + endif() +else() + # Prepare cache if we find it + find_program(CCACHE ccache) + if(CCACHE) + message(STATUS "Using CCache for linux/darwin") + set(LLVM_CCACHE_BUILD ON CACHE BOOL "") + set(LLVM_CCACHE_DIR $ENV{CCACHE_DIR} CACHE STRING "") + set(LLVM_CCACHE_MAXSIZE "2G" CACHE STRING "") + message(STATUS CCACHE_DIR=$ENV{CCACHE_DIR}) + message(STATUS LLVM_CCACHE_BUILD=${LLVM_CCACHE_BUILD}) + message(STATUS LLVM_CCACHE_DIR=${LLVM_CCACHE_DIR}) + message(STATUS LLVM_CCACHE_MAXSIZE=${LLVM_CCACHE_MAXSIZE}) + else() + message(STATUS "Not using CCache") + endif() +endif() + +# Set up main build props + +set(CMAKE_BUILD_TYPE MinSizeRel CACHE STRING "") + +set(LLVM_TARGETS_TO_BUILD "Native" CACHE STRING "") + +set(PACKAGE_VENDOR LLVM.org CACHE STRING "") + +# Turn off +set(LLVM_ENABLE_ASSERTIONS OFF CACHE BOOL "") +set(LLVM_BUILD_EXAMPLES OFF CACHE BOOL "") +set(LLVM_ENABLE_RTTI OFF CACHE BOOL "") + +# Remove external lib dependencies +set(LLVM_ENABLE_LIBXML2 OFF CACHE BOOL "") +set(LLVM_ENABLE_LIBEDIT OFF CACHE BOOL "") +set(LLVM_ENABLE_LIBPFM OFF CACHE BOOL "") +set(LLVM_ENABLE_BINDINGS OFF CACHE BOOL "") +set(LLVM_ENABLE_OCAMLDOC OFF CACHE BOOL "") +set(LLVM_ENABLE_ZLIB OFF CACHE BOOL "") +set(LLVM_ENABLE_TERMINFO OFF CACHE BOOL "") + +# Packing +set(CPACK_BINARY_DEB OFF CACHE BOOL "") +set(CPACK_BINARY_FREEBSD OFF CACHE BOOL "") +set(CPACK_BINARY_IFW OFF CACHE BOOL "") +set(CPACK_BINARY_NSIS OFF CACHE BOOL "") +set(CPACK_BINARY_RPM OFF CACHE BOOL "") +set(CPACK_BINARY_STGZ OFF CACHE BOOL "") +set(CPACK_BINARY_TBZ2 OFF CACHE BOOL "") +set(CPACK_BINARY_TXZ OFF CACHE BOOL "") +set(CPACK_BINARY_TZ OFF CACHE BOOL "") + +if (${CMAKE_HOST_SYSTEM_NAME} MATCHES "Windows") + message(STATUS "Configuring for Windows") + set(LLVM_USE_CRT_RELEASE "MT" CACHE STRING "") + set(LLVM_USE_CRT_MINSIZEREL "MT" CACHE STRING "") + set(LLVM_BUILD_LLVM_C_DYLIB ON CACHE BOOL "") + set(CMAKE_INSTALL_UCRT_LIBRARIES ON CACHE BOOL "") + set(CPACK_BINARY_ZIP ON CACHE BOOL "") +else() + set(LLVM_BUILD_LLVM_DYLIB ON CACHE BOOL "") + set(CPACK_BINARY_TGZ ON CACHE BOOL "") +endif() + + +# Apple specific changes to match their toolchain +if(APPLE) + set(COMPILER_RT_ENABLE_IOS OFF CACHE BOOL "") + set(COMPILER_RT_ENABLE_WATCHOS OFF CACHE BOOL "") + set(COMPILER_RT_ENABLE_TVOS OFF CACHE BOOL "") + + set(CMAKE_MACOSX_RPATH ON CACHE BOOL "") + set(CLANG_SPAWN_CC1 ON CACHE BOOL "") + set(CMAKE_C_FLAGS "-fno-stack-protector -fno-common -Wno-profile-instr-unprofiled" CACHE STRING "") + set(CMAKE_CXX_FLAGS "-fno-stack-protector -fno-common -Wno-profile-instr-unprofiled" CACHE STRING "") +endif() diff --git a/src/munchkin/build-llvm/external.rs b/src/munchkin/build-llvm/external.rs new file mode 100644 index 0000000..5c10f09 --- /dev/null +++ b/src/munchkin/build-llvm/external.rs @@ -0,0 +1,202 @@ +/* +Copyright (c) 2015 Peter Marheine + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +*/ + +// In order to make sure we perform LLVM linking the same way as llvm-sys, +// the following code is from its build.rs: +// https://github.com/tari/llvm-sys.rs/blob/master/build.rs +// Slight changes have been made to satisfy clippy and remove features +// that we don't currently support. +pub mod llvm_sys { + use std::{ + env, + ffi::OsStr, + io, + path::{Path, PathBuf}, + process::Command, + }; + + lazy_static! { + /// Filesystem path to an llvm-config binary for the correct version. + pub static ref LLVM_CONFIG_PATH: Option = crate::locate_llvm_config(); + } + + /// Get the output from running `llvm-config` with the given argument. + /// + /// Lazily searches for or compiles LLVM as configured by the environment + /// variables. + pub fn llvm_config(arg: &str) -> String { + llvm_config_ex(&*LLVM_CONFIG_PATH.clone().unwrap(), arg) + .expect("Surprising failure from llvm-config") + } + + /// Invoke the specified binary as llvm-config. + /// + /// Explicit version of the `llvm_config` function that bubbles errors + /// up. + pub fn llvm_config_ex>(binary: S, arg: &str) -> io::Result { + Command::new(binary) + .arg(arg) + .arg("--link-static") // Don't use dylib for >= 3.9 + .output() + .map(|output| { + String::from_utf8(output.stdout) + .expect("Output from llvm-config was not valid UTF-8") + }) + } + + pub fn get_llvm_cflags() -> String { + let output = llvm_config("--cflags"); + if target_env_is("msvc") { + // MSVC doesn't accept -W... options, so don't try to strip them and + // possibly strip something that should be retained. Also do nothing if + // the user requests it. + return output; + } + + llvm_config("--cflags") + .split(&[' ', '\n'][..]) + .filter(|word| !word.starts_with("-W")) + .collect::>() + .join(" ") + } + + pub fn get_llvm_cxxflags() -> String { + let output = llvm_config("--cxxflags"); + if target_env_is("msvc") { + // MSVC doesn't accept -W... options, so don't try to strip them and + // possibly strip something that should be retained. Also do nothing if + // the user requests it. + return output; + } + + llvm_config("--cxxflags") + .split(&[' ', '\n'][..]) + .filter(|word| !word.starts_with("-W")) + .collect::>() + .join(" ") + } + + pub fn target_env_is(name: &str) -> bool { + match env::var_os("CARGO_CFG_TARGET_ENV") { + Some(s) => s == name, + None => false, + } + } + + pub fn target_os_is(name: &str) -> bool { + match env::var_os("CARGO_CFG_TARGET_OS") { + Some(s) => s == name, + None => false, + } + } + + /// Get the names of the dylibs required by LLVM, including the C++ standard + /// library. + pub fn get_system_libraries() -> Vec { + llvm_config("--system-libs") + .split(&[' ', '\n'] as &[char]) + .filter(|s| !s.is_empty()) + .map(|flag| { + if cfg!(target_env = "msvc") { + // Same as --libnames, foo.lib + assert!(flag.ends_with(".lib")); + &flag[..flag.len() - 4] + } else if cfg!(target_os = "macos") { + // Linker flags style, -lfoo + assert!(flag.starts_with("-l")); + if flag.ends_with(".tbd") && flag.starts_with("-llib") { + &flag[5..flag.len() - 4] + } else { + &flag[2..] + } + } else { + if flag.starts_with("-l") { + // Linker flags style, -lfoo + return flag + .strip_prefix("-l") + .expect("could not strip -l prefix") + .to_owned(); + } + + let maybe_lib = Path::new(&flag); + if maybe_lib.is_file() { + // Library on disk, likely an absolute path to a .so + if let Some(p) = maybe_lib.parent() { + println!("cargo:rustc-link-search={}", p.display()) + } + &maybe_lib.file_stem().unwrap().to_str().unwrap()[3..] + } else { + panic!("Unable to parse result of llvm-config --system-libs") + } + } + .to_owned() + }) + .chain(get_system_libcpp().map(str::to_owned)) + .collect::>() + } + + /// Get the library that must be linked for C++, if any. + pub fn get_system_libcpp() -> Option<&'static str> { + if cfg!(target_env = "msvc") { + // MSVC doesn't need an explicit one. + None + } else if cfg!(target_os = "macos") || cfg!(target_os = "freebsd") { + // On OS X 10.9 and later, LLVM's libc++ is the default. On earlier + // releases GCC's libstdc++ is default. Unfortunately we can't + // reasonably detect which one we need (on older ones libc++ is + // available and can be selected with -stdlib=lib++), so assume the + // latest, at the cost of breaking the build on older OS releases + // when LLVM was built against libstdc++. + Some("c++") + } else { + // Otherwise assume GCC's libstdc++. + // This assumption is probably wrong on some platforms, but would need + // testing on them. + Some("stdc++") + } + } + + /// Get the names of libraries to link against. + pub fn get_link_libraries() -> Vec { + // Using --libnames in conjunction with --libdir is particularly important + // for MSVC when LLVM is in a path with spaces, but it is generally less of + // a hack than parsing linker flags output from --libs and --ldflags. + llvm_config("--libnames") + .split(&[' ', '\n'] as &[char]) + .filter(|s| !s.is_empty()) + .map(|name| { + // --libnames gives library filenames. Extract only the name that + // we need to pass to the linker. + if cfg!(target_env = "msvc") { + // LLVMfoo.lib + assert!(name.ends_with(".lib")); + &name[..name.len() - 4] + } else { + // libLLVMfoo.a + assert!(name.starts_with("lib") && name.ends_with(".a")); + &name[3..name.len() - 2] + } + }) + .map(str::to_owned) + .collect::>() + } +} diff --git a/src/munchkin/build-llvm/src/lib.rs b/src/munchkin/build-llvm/src/lib.rs new file mode 100644 index 0000000..e69de29 diff --git a/src/munchkin/build.ps1 b/src/munchkin/build.ps1 new file mode 100644 index 0000000..08fdd05 --- /dev/null +++ b/src/munchkin/build.ps1 @@ -0,0 +1,9 @@ +#!/usr/bin/env pwsh + +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +#Requires -PSEdition Core + +pwsh -NoProfile -NonInteractive -ExecutionPolicy Bypass -Command "& '$(Join-Path $pwd scripts build.ps1)' $args" +exit $LASTEXITCODE diff --git a/src/munchkin/mypy.ini b/src/munchkin/mypy.ini new file mode 100644 index 0000000..cc4452f --- /dev/null +++ b/src/munchkin/mypy.ini @@ -0,0 +1,9 @@ +[mypy] +files = pykin +strict = True + +[mypy-antlr4] +ignore_missing_imports = True + +[mypy-mock_language.*] +follow_imports = skip diff --git a/src/munchkin/pykin/Cargo.toml b/src/munchkin/pykin/Cargo.toml new file mode 100644 index 0000000..04636d8 --- /dev/null +++ b/src/munchkin/pykin/Cargo.toml @@ -0,0 +1,35 @@ +[package] +authors = ["Oxford Quantum Circuits"] +name = "pykin" +version = "0.1.0" +edition = "2021" +license = "" +description = "" +readme = "README.md" +homepage = "" +repository = "" + +[dependencies.inkwell] +git = "https://github.com/TheDan64/inkwell" +branch = "master" +default-features = false +features = ["llvm14-0"] + +[dependencies] +either = "1.8" +libc = "0.2" +const-str = "0.5" +pyo3 = { version = "0.17", features = ["abi3-py37", "extension-module", "auto-initialize", "num-complex"] } +llvm-sys = "140" +regex = "1.7.1" +log = "0.4.17" +env_logger = "0.9.3" +ctor = "0.2.2" +num = "0.4.0" +bitflags = "2.4.0" + +[lib] +crate-type = ["cdylib"] + +[package.metadata.maturin] +name = "pykin._native" diff --git a/src/munchkin/pykin/MANIFEST.in b/src/munchkin/pykin/MANIFEST.in new file mode 100644 index 0000000..becccf7 --- /dev/null +++ b/src/munchkin/pykin/MANIFEST.in @@ -0,0 +1,2 @@ +include pyproject.toml Cargo.toml +recursive-include src * diff --git a/src/munchkin/pykin/README.md b/src/munchkin/pykin/README.md new file mode 100644 index 0000000..e69de29 diff --git a/src/munchkin/pykin/pykin/__init__.py b/src/munchkin/pykin/pykin/__init__.py new file mode 100644 index 0000000..6a9a6ce --- /dev/null +++ b/src/munchkin/pykin/pykin/__init__.py @@ -0,0 +1,3 @@ +from ._native import Executor, DEFAULT_LOG_FILE +from .adaptors import BuilderAdaptor, RuntimeAdaptor +from .utils import initialize_logger diff --git a/src/munchkin/pykin/pykin/_native.pyi b/src/munchkin/pykin/pykin/_native.pyi new file mode 100644 index 0000000..87e4df5 --- /dev/null +++ b/src/munchkin/pykin/pykin/_native.pyi @@ -0,0 +1,63 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2024 Oxford Quantum Circuits Ltd + +from typing import Any, Optional, List + +from .adaptors import BuilderAdaptor, RuntimeAdaptor + + +DEFAULT_LOG_FILE = "" + +def initialize_file_logger(file_path: str): + pass + +def initialize_commandline_logger(): + pass + + +class Graph: + ... + + +class Executor: + def trace_graphs(self): + ... + + def trace_runtime(self): + ... + + def trace_projections(self): + ... + + def run( + self, + file_path: str, + builder: BuilderAdaptor, + runtime: RuntimeAdaptor + ) -> Any: + """ Runs this file using the automatically-detected entry-point with no arguments. """ + + def run_with_args( + self, + file_path: str, + arguments: List[Any], + builder: BuilderAdaptor, + runtime: RuntimeAdaptor + ) -> Any: + """ Runs this file using the automatically-detected entry-point. """ + + def parse_file( + self, + file: str, + entry_point: Optional[str] + ) -> Graph: + """ Evaluates and builds this file into the internal execution graph and returns it. """ + + def run_graph( + self, + graph: Graph, + arguments: List[Any], + builder_adaptor: BuilderAdaptor, + runtime_adaptor: RuntimeAdaptor + ) -> Any: + """ Runs a pre-built execution graph with the passed-in arguments. """ \ No newline at end of file diff --git a/src/munchkin/pykin/pykin/adaptors.py b/src/munchkin/pykin/pykin/adaptors.py new file mode 100644 index 0000000..f025ddc --- /dev/null +++ b/src/munchkin/pykin/pykin/adaptors.py @@ -0,0 +1,41 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2024 Oxford Quantum Circuits Ltd + +from typing import Dict + + +class BuilderAdaptor: + def cx(self, controls, target, radii): + ... + + def cz(self, controls, target, radii): + ... + + def cy(self, controls, target, radii): + ... + + def x(self, qubit, radii): + ... + + def y(self, qubit, radii): + ... + + def z(self, qubit, radii): + ... + + def swap(self, qubit1, qubit2): + ... + + def reset(self, qubit): + ... + + def measure(self, qubit): + ... + + def clear(self): + ... + + +class RuntimeAdaptor: + def execute(self, builder) -> Dict[str, int]: + ... diff --git a/src/munchkin/pykin/pykin/logger.py b/src/munchkin/pykin/pykin/logger.py new file mode 100644 index 0000000..83f0ed4 --- /dev/null +++ b/src/munchkin/pykin/pykin/logger.py @@ -0,0 +1,589 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2024 Oxford Quantum Circuits Ltd + +import atexit +import json +import logging +import os +import shutil +import sys +from datetime import date, datetime +from enum import Enum +from logging.config import dictConfig +from typing import IO, List, Union + + +# Formatted to "[INFO] 2020-08-25 19:54:28,216 (module_name.function_name:line_number) - message" +default_logger_format = ( + "[%(levelname)s] %(asctime)s (%(module)s.%(funcName)s:%(lineno)d) - %(message)s" +) +json_format = ( + '{"level": "%(levelname)s", "time": "%(asctime)s",' + '"module name": "%(module)s", "function name": "%(funcName)s", "line number": %(lineno)d,' + '"message": "%(message)s"},' +) + + +class LoggerLevel(Enum): + CRITICAL = logging.CRITICAL + ERROR = logging.ERROR + WARNING = logging.WARNING + INFO = logging.INFO + DEBUG = logging.DEBUG + NOTSET = logging.NOTSET + + def __repr__(self): + return self.name + + +class BasicLogger(logging.Logger): + """ + The basic logger class that should be used. Upon setup, this is provided to the + built-in logging by calling ``logging.setLoggerClass``. This way, every new logger + created will be of this class. This allows us to define custom fields and functions + that we want to use with our loggers. This class should not be instantiated + separately, only call ``logging.getLogger("qat.purr.some_name")``, and this will return + an instance of :class:`BasicLogger`. + """ + + def __init__(self, name: str, _log_folder: "LogFolder" = None): + logging.Logger.__init__(self, name) + self.setLevel(logging.INFO) + self.log_folder = _log_folder or LogFolder() + + @property + def logs_path(self): + return self.log_folder.folder_path + + def close(self): + """Closes this logger, cleans up the file handles and appropriate folder.""" + for handler in self.handlers: + try: + handler.close() + except Exception as e: + print(f"Logger handler failed to close cleanly. Message: {e}") + + if self.log_folder is not None: + try: + self.log_folder.close() + except Exception as e: + print(f"Log folder failed to close cleanly. Message: {e}") + + _dummy_log = logging.makeLogRecord({}) + record_override_key = "$_enable_record_override" + + def makeRecord( + self, + name, + level, + fn, + lno, + msg, + args, + exc_info, + func=None, + extra: dict = None, + sinfo=None, + ): + """ + Override that allows us to override record values via the extras dictionary. + Initially built to allow for printing out messages that look like they come from + different places in the source code than where the logger was called. + """ + if extra is None: + extra = {} + + if extra.pop(self.record_override_key, False): + # Strip off values that would conflict with the makeRecord validation then + # just apply them afterwards. + overwriting_values = { + key: value + for key, value in extra.items() + if key in self._dummy_log.__dict__ + } + for key in overwriting_values: + del extra[key] + + # If we have overrides for the function values, just apply them. + if "fn" in extra: + fn = extra.pop("fn") + + if "lno" in extra: + lno = extra.pop("lno") + + if "func" in extra: + func = extra.pop("func") + else: + overwriting_values = {} + + record = super().makeRecord( + name, level, fn, lno, msg, args, exc_info, func, extra, sinfo + ) + for key, value in overwriting_values.items(): + record.__dict__[key] = value + + return record + + def __enter__(self): + pass + + def __exit__(self, exc, value, tb): + self.close() + + def __del__(self): + self.close() + + +class ConsoleLoggerHandler(logging.StreamHandler): + """ + Basic console handler for the logger. It defaults to stdout. + """ + + def __init__(self, stream: IO = sys.stdout): + super().__init__(stream) + self.setFormatter(logging.Formatter(default_logger_format)) + + def __repr__(self): + return "Console logger handler" + + +class FileLoggerHandler(logging.FileHandler): + """ + Basic file handler for the logger. A file path must be provided. The log file is + created with a delay, so the stream is None until the first emit. This also allows + to write some initial stuff to the log file when creating it. + """ + + def __init__(self, file_path: str): + super().__init__(os.path.abspath(file_path), mode="w", delay=True) + self.setFormatter(logging.Formatter(default_logger_format)) + + def emit(self, record): + if self.stream is None: + os.makedirs(os.path.dirname(self.baseFilename), exist_ok=True) + self.stream = self._open() + self.create_initial_file() + logging.FileHandler.emit(self, record) + + def create_initial_file(self): + """ + Implement this method in the derived class to insert some initial text in the + log file. Use emit and flush while writing directly to the stream. + """ + pass + + def __repr__(self): + return "File logger handler (path: %s)" % self.baseFilename + + +class CompositeLogger(BasicLogger): + """ + The default logger class of PuRR. It is intended to store all the loggers in a list, + and when logging, the functions from here should be called, which iterate through + the list and apply the logging to each logger separately. This way, only one + function needs to be called when logging, and it is ensured, that all the enabled + loggers will log the message. + """ + + def __init__( + self, + loggers_or_names: List[Union[str, logging.Logger]] = None, + _log_folder=None, + ): + """Creates the list of loggers on which the logging functions will iterate + + :param logger_names: List of loggers by their names + (e.g. ``["qat.purr.json", "qat.purr.file"]``) or actual logger instances. + """ + super().__init__("default", _log_folder) + + self.loggers = [] + if loggers_or_names is None: + loggers_or_names = [] + + # Add root to our composite so consumers can also hook into our logs. + root = logging.getLogger() + loggers_or_names.append(root) + self.add_loggers(loggers_or_names) + + # Set the root to the lowest non-custom log level activated. + root.setLevel( + min([val.level for val in self.loggers if (float(val.level / 10)).is_integer()]) + ) + + def add_loggers(self, loggers_or_names: List[Union[str, logging.Logger]] = ()): + if loggers_or_names is not None: + for val in loggers_or_names: + if isinstance(val, str): + self.loggers.append(logging.getLogger(val)) + elif isinstance(val, logging.Logger): + self.loggers.append(val) + + def _add_stack_levels(self, kwargs): + """ + Due to the way the loggers work, we need to go back up the stack a few calls to + get the real caller. + """ + kwargs["stacklevel"] = kwargs.get("stacklevel", 0) + 2 + return kwargs + + def info(self, msg: str, *args, **kwargs): + kwargs = self._add_stack_levels(kwargs) + for logger in self.loggers: + logger.info(msg, *args, **kwargs) + + def debug(self, msg: str, *args, **kwargs): + kwargs = self._add_stack_levels(kwargs) + for logger in self.loggers: + logger.debug(msg, *args, **kwargs) + + def error(self, msg: str, *args, **kwargs): + kwargs = self._add_stack_levels(kwargs) + for logger in self.loggers: + logger.error(msg, *args, **kwargs) + + def warning(self, msg: str, *args, **kwargs): + kwargs = self._add_stack_levels(kwargs) + for logger in self.loggers: + logger.warning(msg, *args, **kwargs) + + def critical(self, msg: str, *args, **kwargs): + kwargs = self._add_stack_levels(kwargs) + for logger in self.loggers: + logger.critical(msg, *args, **kwargs) + + def log(self, level, msg: str, *args, **kwargs): + kwargs = self._add_stack_levels(kwargs) + if isinstance(level, LoggerLevel): + level = level.value + for logger in self.loggers: + logger.log(level, msg, *args, **kwargs) + + def exception(self, msg: str, *args, **kwargs): + kwargs = self._add_stack_levels(kwargs) + for logger in self.loggers: + logger.exception(msg, *args, **kwargs) + + def close(self): + super(CompositeLogger, self).close() + for logger in self.loggers: + if isinstance(logger, BasicLogger): + logger.close() + + +class LogFolder: + """ + It is the main log folder in which all the log files are saved. It can be configured + multiple ways, like the base folder path, which can be a specified path of the disk + or ``None`` to save the logs in the system temporary folder. The + :paramref:`labber_style` specifies whether to create a Labber-style folder hierarchy + for the logs or not. If not, depending on the :paramref:`folder_name` parameter, it + will either create random folders for each run, or if :paramref:`folder_name` is not + ``None``, it will create a sub-folder specified by :paramref:`folder_name`. Also, a + :paramref:`prefix` and :paramref:`suffix` can be specified to append the created log + folder (if :paramref:`labber_style` is not True). + """ + + def __init__( + self, + base_folder_path: str = None, + labber_style: bool = None, + cleanup: bool = None, + folder_name: str = None, + prefix: str = None, + suffix: str = None, + ): + """ + The constructor for the LogFolder. It can be configured by the parameters. If + the parameters are not provided, the default variables are used from the front + of the file (which can be also set by importing a configuration). + + :param base_folder_path: Specifies the base directory, where the new log folder + will be created. If it is ``None``, then it is set to the default value + ``default_logger_base_directory``, which is set by the imported + configuration file, otherwise it is defined at the top of this module. If + the default value is ``None``, the log folder will be created in the + system's TMP folder. + :param labber_style: If it is true, it will create a labber hierarchy log + folder. + :param cleanup: If it is true, it will remove the log folder together with the + logs at the end of execution. + :param folder_name: If :paramref:`labber_style` is false, then + :paramref:`folder_name` will be the name of the new log folder instead of + generating a random one. + :param prefix: It appends to the front of the generated folder name + :param suffix: It appends to the end of the generated folder name + """ + self.starting_date = date.today() + self.needs_cleanup: bool = cleanup + + if base_folder_path is None: + base_folder_path = "logs" + + os.makedirs(os.path.abspath(base_folder_path), exist_ok=True) + + folder_name = folder_name or "" + if prefix is not None: + folder_name = f"{prefix}_{folder_name}" + if suffix is not None: + folder_name = f"{folder_name}_{suffix}" + if labber_style: + self.folder_path = self.create_sub_folder_labber_style( + base_folder_path, folder_name=folder_name + ) + else: + self.folder_path = os.path.join(base_folder_path, folder_name) + os.makedirs(self.folder_path, exist_ok=True) + + self.folder_path = os.path.abspath(self.folder_path) + + def get_log_file_path(self, file_name: str = "log", over_write=True): + file_path = os.path.join(self.folder_path, file_name) + if not over_write: + if os.path.exists(file_path): + raise OSError("File already exists!") + return file_path + + def create_sub_folder_labber_style(self, base_folder, folder_name: str = None): + if folder_name is None or folder_name == "": + folder_name = "" + else: + folder_name = folder_name + "." + now = datetime.now() + folder_name = folder_name + f"{now.hour:02d}.{now.minute:02d}.{now.second:02d}" + main_folder_path = self.get_main_folder_path_labber_style(base_folder) + original_name = folder_name + i = 2 + while os.path.exists(os.path.join(main_folder_path, folder_name)): + folder_name = f"{original_name}_{i}" + i += 1 + final_sub_folder_path = os.path.join(main_folder_path, folder_name) + os.makedirs(final_sub_folder_path, exist_ok=True) + return final_sub_folder_path + + @staticmethod + def get_main_folder_path_labber_style(base_folder): + now = datetime.now() + year, month, day = now.year, now.month, now.day + main_folder_path = os.path.join( + base_folder, f"{year:04d}", f"{month:02d}", f"Data_{month:02d}{day:02d}" + ) + return main_folder_path + + def close(self): + if self.folder_path is not None and os.path.isdir(self.folder_path): + if self.needs_cleanup: + shutil.rmtree(self.folder_path) + else: + try: + os.removedirs(self.folder_path) + except OSError: + pass + + def __repr__(self): + return self.folder_path + + def __del__(self): + self.close() + + +class KeywordFilter(logging.Filter): + """ + A customized keyword filter that can be added to a log handler or a logger. Filters + all the log messages, and if the message content contains the keyword, the log will + not be printed. + """ + + def __init__(self, keyword=""): + super().__init__(keyword) + self.keyword = keyword + + def filter(self, record: logging.LogRecord): + if self.keyword in record.msg: + return False + return True + + +class ModuleFilter(logging.Filter): + """ + A customized module filter that can be added to a log handler or a logger. Filters + all the log messages, and if the log was produced by a module with the specified + module name, the log will not pass. + """ + + def __init__(self, module_name=""): + super().__init__(module_name) + self.module_name = module_name + + def filter(self, record: logging.LogRecord): + if self.module_name in record.module: + return False + return True + + +class LevelFilter(logging.Filter): + """ + Filter out the debug messages from the Jupyter logs. This is needed because the + specialized logging functions, like code or output have smaller level than the + DEBUG logging level (so that other than Jupyter handlers don't process them). + """ + + def __init__(self, level): + super().__init__() + self.level = logging.getLevelName(level) + + def filter(self, record: logging.LogRecord): + if isinstance(self.level, int) and record.levelno == self.level: + return False + return True + + +logging.setLoggerClass(BasicLogger) + + +def import_logger_configuration(logger_config: dict, log_folder: LogFolder = None): + """ + It imports the configuration of the loggers from a JSON data structure. This must be + in the format described by `logging.config + `_ built-in module. + + It can also contain some additional settings: + + - :default_logger_directory: This is where a new log folder will be created for each + execution. If it is set to None the system's temp folder is used. + - :default_logger_cleanup: Specifies whether the log folders should be removed after + execution or not. + - :default_logger_labber_style: If this is true, it will create a log folder + hierarchy in labber style at the specified **default_logger_directory** + + The logger list in the config file may also contain some additional settings: + + - :class: If this is specified, then the logger is of a custom class, not included + in the built-in logging package. Similar to how the handlers are defined by the + '()' key if they are custom handlers. + - :active: If this is false, than the corresponding logger will not be imported. + It's an easier way not to include a logger than to remove it from the config + file, because then if the logger will be needed sometime, it doesn't require to + re-write the config file, just change **active** from false to true. + + The configuration may also contain the starting log folder settings + (:paramref:`log_folder`). Each time the logging configuration is imported, the log + folder will be set up as it is specified. If this is not provided in the JSON + structure, than the created log folder will use the default settings (which can also + be specified in the configuration, as described above). + + + :param logger_config: The JSON data structure from the logger_settings.json + configuration + :param log_folder: The log folder to be used instead of the one specified in the + configuration file + :return: List of the imported loggers. They are already loaded and configured. + """ + if log_folder is None: + if "log_folder" in logger_config: + log_folder = LogFolder(**logger_config["log_folder"]) + else: + log_folder = LogFolder() + + if "external_loggers" in logger_config: + logger_config["loggers"].update(logger_config["external_loggers"]) + + non_active_loggers = [] + for key, value in logger_config["loggers"].items(): + if "active" in value and value["active"] == 0: + non_active_loggers.append(key) + + for key, value in logger_config["handlers"].items(): + # If we have a path append the current log folder onto it. + file_path = value.get("file_path", None) + if file_path is not None: + value["file_path"] = os.path.join(log_folder.folder_path, file_path) + + for key in non_active_loggers: + logger_config["loggers"].pop(key) + + dictConfig(logger_config) + log_keys = set(logger_config["loggers"].keys()) + if "external_loggers" in logger_config: + log_keys.difference_update(set(logger_config["external_loggers"].keys())) + + return CompositeLogger(list(log_keys), log_folder) + + +def get_logger_config(config_file=None, log_folder: LogFolder = None): + """ + It imports the logger configuration from the provided JSON file. If this is not + provided, then the current directory is searched for a logger_settings.json + configuration file. If not found, then the default JSON file is read from + qat/purr/logger_settings.json + + :param config_file: The path to the JSON file on the disk containing the logger + configuration + :param log_folder: The log folder to be used instead of the one specified in the + configuration file + :return: A DefaultLogger instance configured with the names of the imported loggers + """ + + if config_file is None: + config_file = "logger_settings.json" + + potential_file = config_file + if not os.path.isfile(potential_file): + potential_file = os.path.join(os.getcwd(), config_file) + if not os.path.isfile(potential_file): + potential_file = os.path.join(os.path.dirname(__file__), config_file) + + if not os.path.isfile(potential_file): + print( + f"Log config file {config_file} doesn't exist and can't be found using " + "default search patterns. Loading default configuration." + ) + potential_file = os.path.join(os.path.dirname(__file__), "logger_settings.json") + + with open(potential_file, "r") as f: + logger_config = json.load(f) + return import_logger_configuration(logger_config, log_folder) + + +@atexit.register +def close_logger(): + """ + This method is executed upon exit, and it closes all the file handlers from the + default loggers. + + This allows to remove the log folder after the execution is finished. This is needed + because otherwise, the handlers would be closed after everything else, after the log + folder is being removed. + """ + def_logger = get_default_logger() + if def_logger is not None: + def_logger.close() + + +_default_logging_instance = None + + +def get_default_logger(): + """ + Initializes the global logger or fetches one if it already exists. + """ + global _default_logging_instance + if _default_logging_instance is None: + _default_logging_instance = get_logger_config() + + # Do a very hacky check about whether we're in a testing environment and force + # clean-up if we are. + try: + import traceback + + stack = traceback.extract_stack() + is_test_env = any( + val.filename is not None and val.filename.endswith(f"unittest\\loader.py") + for val in stack + ) + if is_test_env: + _default_logging_instance.log_folder.needs_cleanup = True + except: + _default_logging_instance.warning("Test environment detection failed.") + + return _default_logging_instance diff --git a/src/munchkin/pykin/pykin/runtime.py b/src/munchkin/pykin/pykin/runtime.py new file mode 100644 index 0000000..536bf58 --- /dev/null +++ b/src/munchkin/pykin/pykin/runtime.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2024 Oxford Quantum Circuits Ltd + +from os import remove +from os.path import dirname, exists, join +from tempfile import NamedTemporaryFile +from typing import Any, List + +from pykin import initialize_logger, BuilderAdaptor, RuntimeAdaptor, Executor, DEFAULT_LOG_FILE + +dev_directory = join(dirname(__file__), "..", "..", "..", "munchkin") + +# Enable file logging if we're in a development environment. +if exists(dev_directory): + initialize_logger(join(f"{dev_directory}", f"{DEFAULT_LOG_FILE}")) + + +class MunchkinRuntime: + """ + Wrapper API for native functionality, purely in Python to help do mappings. + """ + + def __init__(self, builder: BuilderAdaptor, runtime: RuntimeAdaptor): + self.builder: BuilderAdaptor = builder + self.runtime: RuntimeAdaptor = runtime + self.executor = Executor() + + def trace_graphs(self): + self.executor.trace_graphs() + return self + + def trace_projections(self): + self.executor.trace_projections() + return self + + def trace_runtime(self): + self.executor.trace_runtime() + return self + + def run_ll(self, ll_string: str, args: List[Any] = None): + """Runs a .ll string. Creates temporary file and writes to it.""" + with NamedTemporaryFile(suffix=".ll", delete=False) as fp: + fp.write(ll_string) + fp.close() + try: + return self.run(fp.name, args) + finally: + remove(fp.name) + + def run_bitcode(self, bitcode: bytes, args: List[Any] = None): + """Runs LLVM bitcode when passed as bytes. Creates temporary file and writes to it.""" + with NamedTemporaryFile(suffix=".bc", delete=False) as fp: + fp.write(bitcode) + fp.close() + try: + return self.run(fp.name, args) + finally: + remove(fp.name) + + def run(self, file_path: str, args: List[Any] = None): + results = self.executor.run_with_args( + file_path, args or [], self.builder, self.runtime + ) + return results diff --git a/src/munchkin/pykin/pykin/simulators.py b/src/munchkin/pykin/pykin/simulators.py new file mode 100644 index 0000000..a29cc40 --- /dev/null +++ b/src/munchkin/pykin/pykin/simulators.py @@ -0,0 +1,80 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2024 Oxford Quantum Circuits Ltd + +from typing import Dict + +from pykin import BuilderAdaptor, RuntimeAdaptor +from qiskit.providers.models import QasmBackendConfiguration + +from qiskit import QiskitError, QuantumCircuit, transpile +from qiskit_aer import AerSimulator + +from pykin.runtime import MunchkinRuntime + + +def fetch_qasm_runtime(qubit_count=30): + return MunchkinRuntime(QASMBuilder(qubit_count), QASMRuntime()) + + +class QASMBuilder(BuilderAdaptor): + def __init__(self, qubit_count: int): + super().__init__() + self.circuit = QuantumCircuit(qubit_count, qubit_count) + self.shot_count = 1024 + self.bit_count = 0 + + def cx(self, controls, target, theta): + self.circuit.crx(theta, controls, target) + + def cz(self, controls, target, theta): + self.circuit.crx(theta, controls, target) + + def cy(self, controls, target, theta): + self.circuit.cry(theta, controls, target) + + def x(self, qubit, theta): + self.circuit.rx(theta, qubit) + + def y(self, qubit, theta): + self.circuit.ry(theta, qubit) + + def z(self, qubit, theta): + self.circuit.rz(theta, qubit) + + def swap(self, qubit1, qubit2): + self.circuit.swap(qubit1, qubit2) + return self + + def reset(self, qubit): + self.circuit.reset(qubit) + + def measure(self, qubit): + self.circuit.measure(qubit, self.bit_count) + self.bit_count = self.bit_count + 1 + return self + + def clear(self): + self.circuit.clear() + self.bit_count = 0 + + +class QASMRuntime(RuntimeAdaptor): + def execute(self, builder: QASMBuilder) -> Dict[str, int]: + aer_config = QasmBackendConfiguration.from_dict(AerSimulator._DEFAULT_CONFIGURATION) + aer_config.n_qubits = builder.circuit.num_qubits + qasm_sim = AerSimulator(aer_config) + + circuit = builder.circuit + # TODO: Needs a more nuanced try/catch. Some exceptions we should catch, others we should re-throw. + try: + job = qasm_sim.run(transpile(circuit, qasm_sim), shots=builder.shot_count) + results = job.result() + distribution = results.get_counts() # Used to pass in circuit, check. + except QiskitError as e: + raise ValueError(f"Error while attempting to build/run circuit: {str(e)}") + + removals = builder.circuit.num_qubits - builder.bit_count + + # Because qiskit needs all values up-front we just provide a maximal classical register then strim off + # the values we aren't going to use. + return {key[removals:]: value for key, value in distribution.items()} diff --git a/src/munchkin/pykin/pykin/utils.py b/src/munchkin/pykin/pykin/utils.py new file mode 100644 index 0000000..ccc3bf1 --- /dev/null +++ b/src/munchkin/pykin/pykin/utils.py @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: BSD-3-Clause +# Copyright (c) 2024 Oxford Quantum Circuits Ltd + +from typing import Optional + +from ._native import initialize_file_logger, initialize_commandline_logger + + +def initialize_logger(file_path: Optional[str] = None): + if file_path is None: + initialize_commandline_logger() + else: + initialize_file_logger(file_path) diff --git a/src/munchkin/pykin/pyproject.toml b/src/munchkin/pykin/pyproject.toml new file mode 100644 index 0000000..db1d044 --- /dev/null +++ b/src/munchkin/pykin/pyproject.toml @@ -0,0 +1,30 @@ +[project] +name = "pykin" +version = "0.1.0" +requires-python = ">=3.9" +dependencies = [ + "pytest", + "qiskit==0.45.*", + "qiskit-optimization==0.4.*", + "qiskit-ignis==0.7.*", + "qiskit-aer==0.13.*", +] + +#classifiers = [ +# "License :: OSI Approved :: MIT License", +# "Development Status :: 3 - Alpha", +# "Intended Audience :: Developers", +# "Programming Language :: Python :: 3.7", +# "Programming Language :: Python :: 3.8", +# "Programming Language :: Python :: 3.9", +# "Programming Language :: Python :: 3.10", +# "Programming Language :: Python", +# "Programming Language :: Rust", +# "Operating System :: MacOS", +# "Operating System :: Microsoft :: Windows", +# "Operating System :: POSIX :: Linux", +#] + +[build-system] +requires = ["maturin ~= 0.13.7"] +build-backend = "maturin" diff --git a/src/munchkin/pykin/src/analysis.rs b/src/munchkin/pykin/src/analysis.rs new file mode 100644 index 0000000..d5b9b21 --- /dev/null +++ b/src/munchkin/pykin/src/analysis.rs @@ -0,0 +1,944 @@ +use std::borrow::Borrow; +use std::cmp::Ordering; +use std::collections::{HashMap, HashSet}; +use std::fmt::{Display, Formatter}; +use std::iter::zip; +use std::ops::{Deref}; +use log::{Level, log}; +use num::traits::FloatConst; +use crate::builders::{Builder, InstructionBuilder}; +use crate::execution::EngineCollection; +use crate::hardware::Qubit; +use crate::runtime::{ActiveTracers, TracingModule}; +use crate::smart_pointers::{Ptr}; +use crate::{with_mutable, with_mutable_self}; + +#[derive(Clone)] +pub struct StateHistory { + index: i64, + metadata: Ptr, + + // TODO: Pointer to avoid mutability. + timeline: Ptr> +} + +macro_rules! cluster_or_state { + ($self:ident, $axis:ident, $arg:ident) => { + match $self.state_of() { + StateElement::Single(qstate) => { + let counter = &with_mutable_self!($self.metadata.next_counter()); + let mut next_state = qstate.clone_with_counter(counter); + next_state.$axis($arg); + with_mutable_self!($self.timeline.insert(counter.clone(), StateElement::Single(next_state))); + } + StateElement::Cluster(qcluster) => { + qcluster.$axis($arg, &$self.index); + } + } + }; + ($self:ident, $method:ident) => { + match $self.state_of() { + StateElement::Single(qstate) => { + let counter = &with_mutable_self!($self.metadata.next_counter()); + let mut next_state = qstate.clone_with_counter(counter); + next_state.$method(); + with_mutable_self!($self.timeline.insert(counter.clone(), StateElement::Single(next_state))); + } + StateElement::Cluster(qcluster) => { + qcluster.$method(&$self.index); + } + } + }; +} + +impl StateHistory { + pub fn new(meta: &Ptr, index: &i64) -> StateHistory { + StateHistory { timeline: Ptr::from(HashMap::new()), metadata: meta.clone(), index: index.clone() } + } + + /// Direct manipulation to the timeline. Means all existing rotational history will be lost. + pub fn add(&self, counter: i64, element: StateElement) { + with_mutable_self!(self.timeline.insert(counter, element)); + } + + pub fn X(&self, radii: i64) { + cluster_or_state!(self, X, radii); + } + + pub fn Y(&self, radii: i64) { + cluster_or_state!(self, Y, radii); + } + + pub fn Z(&self, radii: i64) { + cluster_or_state!(self, Z, radii); + } + + pub fn measure(&self) { + cluster_or_state!(self, measure); + } + + pub fn reset(&self) { + self.measure(); + + // We measure first to collapse any state then just reset our timeline to 0. + let counter = with_mutable_self!(self.metadata.next_counter()); + self.add(counter.clone(), StateElement::Single(SingleState::new(&counter, SpherePoint::new(), &self.index))); + } + + fn controlled_rotation(&self, sphere: SpherePoint, conditioned_on: &Vec, result: i8) { + let current_counter = with_mutable_self!(self.metadata.next_counter()); + let cluster = self.form_cluster(current_counter.borrow(), conditioned_on); + with_mutable!(cluster.entangle(ClusterRelationship::new(sphere, current_counter, self.index, conditioned_on, result))); + } + + pub fn CX(&self, radii: i64, conditioned_on: &Vec, result: i8) { + let mut sphere = SpherePoint::new(); + sphere.X(radii); + self.controlled_rotation(sphere, conditioned_on, result); + } + + pub fn CY(&self, radii: i64, conditioned_on: &Vec, result: i8) { + let mut sphere = SpherePoint::new(); + sphere.X(radii); + self.controlled_rotation(sphere, conditioned_on, result); + } + + pub fn CZ(&self, radii: i64, conditioned_on: &Vec, result: i8) { + let mut sphere = SpherePoint::new(); + sphere.Z(radii); + self.controlled_rotation(sphere, conditioned_on, result); + } + + /// Adds a cluster to this state, forming an entangled cluster. + fn add_cluster(&self, counter: &i64, cluster: &Ptr) { + with_mutable_self!(self.timeline.insert(counter.clone(), StateElement::Cluster(cluster.clone()))); + } + + /// Forms a cluster group with the states at the passed-in index. + fn form_cluster(&self, counter: &i64, targets: &Vec) -> Ptr { + if let StateElement::Cluster(cluster) = self.state_of() { + if cluster.spans() == targets.iter().map(|val| val.clone()).collect::>() { + return cluster.clone(); + } + } + + // If any of our targets are already clusters then we expand over those clusters as well. + let mut target_indexes = targets.iter().map(|val| val.clone()).collect::>(); + for target in targets { + let state = with_mutable_self!(self.metadata.root.get_history(target)); + if let StateElement::Cluster(cluster) = state.state_of() { + for id in cluster.spans() { + target_indexes.insert(id); + } + } + } + + // Finally build a super-cluster that spans every qubit. + let cluster = Ptr::from(ClusterState::new(&self.metadata)); + for target in target_indexes { + let state = with_mutable_self!(self.metadata.root.get_history(&target)); + state.add_cluster(counter, &cluster); + } + + self.add_cluster(counter, &cluster); + cluster.clone() + } + + pub fn state_of(&self) -> &StateElement { + // To make things simpler, if we attempt to get a state on an empty collection, just + // insert a zero-rotation at the beginning. + // + // This also holds because when you entangle something it because something else, so + // seeing it as a continuation of an existing rotation isn't precisely true. + if self.timeline.is_empty() { + self.X(0); + } + + self.timeline.values().last().unwrap() + } +} + +#[derive(Clone)] +pub enum StateElement { + Single(SingleState), + Cluster(Ptr) +} + +#[derive(Clone)] +pub struct SingleState { + counter: i64, + state: SpherePoint, + + /// Has this state been collapsed into a classical value? + collapsed: bool, + index: i64 +} + +impl SingleState { + pub fn new(counter: &i64, + state: SpherePoint, + index: &i64) -> SingleState { + SingleState {counter: counter.clone(), state, collapsed: false, index: index.clone()} + } + + /// States are commonly cloned with a different counter to perform further rotations on. + pub fn clone_with_counter(&self, counter: &i64) -> SingleState { + SingleState::new(counter, self.state.clone(), &self.index) + } + + pub fn X(&mut self, radii: i64) { + self.state.X(radii) + } + + pub fn Y(&mut self, radii: i64) { + self.state.Y(radii) + } + + pub fn Z(&mut self, radii: i64) { + self.state.Z(radii) + } + + /// Sets that this is a measure point with no modifications. + pub fn measure(&mut self) { + self.collapsed = true; + } +} + +#[derive(Clone)] +pub struct ClusterState { + clustered_state: QuantumState, + entanglement: Vec, + + /// History of collapsed states. Key is counter, results are target qubit and its exact history. + // TODO: Pointer to avoid mutability. + collapse_history: Ptr>, + metadata: Ptr +} + +impl ClusterState { + pub fn new(meta: &Ptr) -> ClusterState { + ClusterState { + clustered_state: QuantumState::new(meta), + entanglement: Vec::new(), + collapse_history: Ptr::from(HashMap::new()), + metadata: meta.clone() + } + } + + pub fn measure(&self, target: &i64) { + let cstate = &self.clustered_state; + self.clustered_state.measure(target); + + let graph = &cstate.state_graph; + let entry = with_mutable!(graph.remove(target).unwrap()); + with_mutable_self!(self.collapse_history.insert(self.metadata.counter.clone(), (target.clone(), entry))); + } + + pub fn X(&self, radii: i64, index: &i64) { + self.clustered_state.X(radii, index); + } + + pub fn Y(&self, radii: i64, index: &i64) { + self.clustered_state.Y(radii, index); + } + + pub fn Z(&self, radii: i64, index: &i64) { + self.clustered_state.Z(radii, index); + } + + pub fn entangle(&mut self, rel: ClusterRelationship) { + self.entanglement.push(rel); + } + + pub fn spans(&self) -> HashSet { + self.clustered_state.state_graph.keys().map(|val| val.clone()).collect::>() + } +} + +/// TODO: Swap to more matrix-y representation now. +#[derive(Clone)] +pub struct SpherePoint { + amplitude: i64, + phase: i64, +} + +impl SpherePoint { + pub fn new() -> SpherePoint { + SpherePoint { amplitude: 0, phase: 0 } + } + + pub fn with_X(radii: i64) -> SpherePoint { + let mut sp = SpherePoint::new(); + sp.X(radii); + sp + } + + pub fn with_Y(radii: i64) -> SpherePoint { + let mut sp = SpherePoint::new(); + sp.Y(radii); + sp + } + + pub fn with_Z(radii: i64) -> SpherePoint { + let mut sp = SpherePoint::new(); + sp.Z(radii); + sp + } + + pub fn X(&mut self, radii: i64) { + self.amplitude = (self.amplitude + radii) % 360 + } + + pub fn Y(&mut self, radii: i64) { + self.phase = (self.phase + radii) % 360 + } + + // TODO: wrong, fix later. + pub fn Z(&mut self, radii: i64) { + let ratio = radii % 360; + + if radii == 0 { + return; + } + + // Shortcircuit on rotation poles. + if (self.amplitude == 90 || self.amplitude == 270) && (self.phase == 0 || self.phase == 180) { + return; + } + + let phase = self.phase; + let amp = self.amplitude; + + if radii == 90 { + self.phase = amp; + self.amplitude = phase; + } else if radii == 180 { + self.phase = -amp % 360; + self.amplitude = -phase % 360; + } else if radii == 270 { + self.phase = -phase % 360; + self.amplitude = -amp % 360; + } else { + panic!("Irregular Y rotation added to prediction algorithm. Unsupported right now.") + } + } +} + +impl Default for SpherePoint { + fn default() -> Self { + SpherePoint::new() + } +} + +#[derive(Clone)] +pub struct ClusterRelationship { + rotation: SpherePoint, + at_counter: i64, + target: i64, + conditioned_on: Vec, + on_value: i8 +} + +impl ClusterRelationship { + pub fn new(rotation: SpherePoint, at_counter: i64, target: i64, conditioned_on: &Vec, on_value: i8) -> ClusterRelationship { + ClusterRelationship { rotation, at_counter, target, conditioned_on: conditioned_on.clone(), on_value } + } +} + +/// Collection representing a quantum state with qubits identified by index. +#[derive(Clone)] +pub struct QuantumState { + metadata: Ptr, + + /// Key = index, Value = staet history. + state_graph: Ptr> +} + +impl QuantumState { + pub fn new(meta: &Ptr) -> QuantumState { + let collection = QuantumState { state_graph: Ptr::from(HashMap::default()), metadata: meta.clone() }; + + // If we're the root collection in the hierarchy just mark us as such. + if Ptr::is_null(&meta.root) { + with_mutable!(meta.root = Ptr::from(collection.borrow())); + } + collection + } + + pub fn get_history(&self, index: &i64) -> &mut StateHistory { + if let Some(qt) = with_mutable_self!(self.state_graph.get_mut(index)) { + qt + } else { + let timeline = StateHistory::new(&self.metadata, index); + with_mutable_self!(self.state_graph.insert(index.clone(), timeline)); + with_mutable_self!(self.state_graph.get_mut(index).unwrap()) + } + } + + pub fn X(&self, radii: i64, target: &i64) { + let qt = self.get_history(target); + qt.X(radii); + } + + pub fn Y(&self, radii: i64, target: &i64) { + let qt = self.get_history(target); + qt.Y(radii); + } + + pub fn Z(&self, radii: i64, target: &i64) { + let qt = self.get_history(target); + qt.Z(radii); + } + + pub fn CX(&self, radii: i64, target: &i64, conditioned_on: &Vec, result: i8) { + let qt = self.get_history(target); + qt.CX(radii, conditioned_on, result); + } + + pub fn CY(&self, radii: i64, target: &i64, conditioned_on: &Vec, result: i8) { + let qt = self.get_history(target); + qt.CY(radii, conditioned_on, result); + } + + pub fn CZ(&self, radii: i64, target: &i64, conditioned_on: &Vec, result: i8) { + let qt = self.get_history(target); + qt.CZ(radii, conditioned_on, result); + } + + pub fn swap(&self, first: &i64, second: &i64) { + let left_history = self.get_history(first); + let right_history = self.get_history(second); + + let left_state = left_history.state_of(); + let right_state = right_history.state_of(); + + let op_counter = with_mutable_self!(self.metadata.next_counter()); + match left_state { + StateElement::Single(single) => { + right_history.add(op_counter.clone(), StateElement::Single(single.clone_with_counter(&op_counter))); + } + StateElement::Cluster(cluster) => { + right_history.add(op_counter.clone(), StateElement::Cluster(cluster.clone())); + } + } + + match right_state { + StateElement::Single(single) => { + left_history.add(op_counter.clone(), StateElement::Single(single.clone_with_counter(&op_counter))); + } + StateElement::Cluster(cluster) => { + left_history.add(op_counter.clone(), StateElement::Cluster(cluster.clone())); + } + } + } + + pub fn measure(&self, target: &i64) { + let state = self.get_history(target); + state.measure(); + } + + pub fn reset(&self, target: &i64) { + let state = self.get_history(target); + state.reset(); + } +} + +pub struct Metadata { + /// Current program-counter we're on. + counter: i64, + + /// Root collection in our hierarchy. Can be used for top-level searches and queries. + root: Ptr +} + +impl Metadata { + pub fn new() -> Metadata { + Metadata { counter: 0, root: Ptr::default() } + } + + pub fn next_counter(&mut self) -> i64 { + self.counter = self.counter + 1; + return self.counter + } +} + +/// Transform radians into degrees for easy debugging for now. +/// TODO: Likely change form later. +fn conv(radians: &f64) -> i64 { + (radians * 180.0/f64::PI()) as i64 +} + +pub struct QuantumStatePredictor { + state: QuantumState +} + +impl QuantumStatePredictor { + pub fn new() -> QuantumStatePredictor { + QuantumStatePredictor { state: QuantumState::new(&Ptr::from(Metadata::new())) } + } + + pub fn add(&self, op: Ptr) { + match op.deref() { + QuantumOperations::Reset(qbs) => { + for qubit in qbs { + self.state.reset(&qubit.index) + } + } + QuantumOperations::U(qb, theta, phi, lambda) => { + self.state.Z(qb.index, &conv(lambda)); + self.state.Y(qb.index, &conv(theta)); + self.state.Z(qb.index, &conv(phi)); + } + QuantumOperations::X(qb, radians) => { + self.state.X(qb.index, &conv(radians)); + } + QuantumOperations::Y(qb, radians) => { + self.state.Y(qb.index, &conv(radians)); + } + QuantumOperations::Z(qb, radians) => { + self.state.Z(qb.index, &conv(radians)); + } + QuantumOperations::CX(controls, targets, radians) => { + self.state.CX( + 180, &targets.index, + &controls.iter().map(|val| val.index.clone()).collect::>(), 1) + } + QuantumOperations::CZ(controls, targets, radians) => { + self.state.CZ( + 180, &targets.index, + &controls.iter().map(|val| val.index.clone()).collect::>(), 1) + } + QuantumOperations::CY(controls, targets, radians) => { + self.state.CY( + 180, &targets.index, + &controls.iter().map(|val| val.index.clone()).collect::>(), 1) + } + QuantumOperations::Measure(qbs) => { + for qb in qbs { + self.state.measure(&qb.index); + } + }, + QuantumOperations::Initialize() | + QuantumOperations::I(_) => {}, + } + } +} + +/// A projected value that is either concretized and has a result, or in analysis mode and can be +/// queried LIKE it was a result, but we haven't actually executed on the QPU yet. +pub struct QuantumProjection { + trace_module: Ptr, + engines: Ptr, + instructions: Vec>, + cached_result: Option, + cached_filtered: HashMap, +} + +pub enum QuantumOperations { + Initialize(), + Reset(Vec), + I(Qubit), + U(Qubit, f64, f64, f64), + X(Qubit, f64), + Y(Qubit, f64), + Z(Qubit, f64), + CX(Vec, Qubit, f64), + CZ(Vec, Qubit, f64), + CY(Vec, Qubit, f64), + Measure(Vec) +} + +impl QuantumOperations { + /// This only returns directly-attached qubits. So it does not return the controllers of a + /// controlled operation, but it does return the target. + pub fn associated_qubits(&self) -> Vec<&Qubit> { + match self { + QuantumOperations::Initialize() => vec![], + QuantumOperations::Reset(qbs) => qbs.iter().collect(), + QuantumOperations::I(qb) | + QuantumOperations::U(qb, _, _, _) | + QuantumOperations::X(qb, _) | + QuantumOperations::Y(qb, _) | + QuantumOperations::Z(qb, _) | + QuantumOperations::CX(_, qb, _) | + QuantumOperations::CZ(_, qb, _) | + QuantumOperations::CY(_, qb, _) => vec![qb], + QuantumOperations::Measure(qbs) => qbs.iter().collect() + } + } +} + +impl Display for QuantumOperations { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + QuantumOperations::Initialize() => "init".to_string(), + QuantumOperations::Reset(qb) => format!("Reset {}", qb.iter().map(|val| val.to_string()).collect::>().join(", ")), + QuantumOperations::I(qb) => format!("id[{}]", qb), + QuantumOperations::U(qb, theta, phi, lambda) => format!("U[{}] {},{},{}", qb, theta, phi, lambda), + QuantumOperations::X(qb, theta) => format!("X[{}] {}", qb, theta), + QuantumOperations::Y(qb, theta) => format!("Y[{}] {}", qb, theta), + QuantumOperations::Z(qb, theta) => format!("Z[{}] {}", qb, theta), + QuantumOperations::CX(controlled, target, theta) => + format!("CX[{}->{}] {}", controlled.iter().map(|val| val.to_string()).collect::>().join(","), target, theta), + QuantumOperations::CZ(controlled, target, theta) => + format!("CZ[{}->{}] {}", controlled.iter().map(|val| val.to_string()).collect::>().join(","), target, theta), + QuantumOperations::CY(controlled, target, theta) => + format!("CY[{}->{}] {}", controlled.iter().map(|val| val.to_string()).collect::>().join(","), target, theta), + QuantumOperations::Measure(qb) => + format!("Measure {}", qb.iter().map(|val| val.to_string()).collect::>().join(",")) + }.as_str()) + } +} + +impl QuantumProjection { + pub fn new(engines: &Ptr) -> QuantumProjection { + QuantumProjection { + engines: engines.clone(), + instructions: Vec::new(), + trace_module: Ptr::from(TracingModule::new()), + cached_result: None, + cached_filtered: HashMap::new() + } + } + + pub fn with_tracer(engines: &Ptr, module: &Ptr) -> QuantumProjection { + QuantumProjection { + engines: engines.clone(), + instructions: Vec::new(), + trace_module: module.clone(), + cached_result: None, + cached_filtered: HashMap::new() + } + } + + /// Quick helper module as right now there's no sub-definition for projections. + fn is_tracing(&self) -> bool { + self.trace_module.has(ActiveTracers::Projections) + } + + pub fn add(&mut self, inst: &Ptr) { + // Clear any pre-computed results upon a change to the state. + if self.cached_result.is_some() { + self.cached_result = None; + self.cached_filtered.clear(); + } + self.instructions.push(inst.clone()); + } + + /// Equality across projections for specific qubit. + pub fn is_equal_for(&self, other: &Self, qbs: Option<&Vec>) -> bool { + // TODO: Needs far more nuanced equality check, as we want to check on predicted values. + + // If we're full comparison, do a quick short-circuit. + if let None = qbs { + if self.instructions.len() != other.instructions.len() { + return false; + } + } + + let index_set: Option> = qbs.map(|val| HashSet::from_iter(val)); + for (ours, theirs) in zip(&self.instructions, &other.instructions) { + if let Some(qubits) = index_set.as_ref() { + let ours_match = ours.associated_qubits().iter().map(|val| val.index).any(|val| qubits.contains(&val)); + let theirs_match = theirs.associated_qubits().iter().map(|val| val.index).any(|val| qubits.contains(&val)); + + // Skip comparison of instructions which don't have anything to do with our filter, + // return false if we have one which does relate but the other does not. + if !ours_match && !theirs_match { + continue; + } else if ours_match != theirs_match { + return false; + } + } + + // TODO: These instructions shouldn't live long, so just do string compare. Inefficient but + // convenient. + if ours.to_string() != theirs.to_string() { + return false; + } + } + + return true; + } + + /// Is our projection simple enough to use algorithmic prediction? + pub fn can_predict(&self) -> bool { + false + } + + /// Perform algorithmic state value prediction. + fn predict(&mut self) -> AnalysisResult { + AnalysisResult::one() + } + + /// Get results for this entire state. + pub fn results(&mut self) -> AnalysisResult { + self.concretize().clone() + } + + /// Extracts the results for this particular set of qubits from the results of running this + /// projection. + /// + /// For example if your results are 01: 150, 00: 50 and ask for qubit 0 you'll get the + /// result 0: 200. It needs to be pointed out that this needs to be viewed as a window into the + /// overall result, not something that can be viewed by itself, because you lose all the nuance + /// around the overall state. + /// + /// It's great for asking more brute-force questions like 'is this qubit overwhelmingly 1 in the + /// results' and things of that sort though. This is also used for implicit conditional + /// evaluations. + pub fn results_for(&mut self, qb: &Vec) -> AnalysisResult { + if qb.is_empty() { + return AnalysisResult::empty() + } + + // Check if we have a cached value, if so, return. + let positions = qb.iter().map(|val| val.index as usize).collect::>(); + let cache_key = positions.iter().map(|val| val.to_string()).collect::>().join(","); + if let Some(cached) = self.cached_filtered.get(&cache_key) { + return cached.clone(); + } + + let results = self.concretize(); + + // Strip out set qubits from the results. So if you have 01010: 50 and 01011: 7 + let mut new_distribution: HashMap = HashMap::new(); + for (key, value) in results.distribution.iter() { + // -1 for zero-indexing. + let key_length = key.len() - 1; + let mut new_key= String::new(); + for index in positions.iter() { + if let Some(nth_value) = key.chars().nth(key_length - index) { + new_key.push(nth_value); + } + } + + if !new_key.is_empty() { + let existing = if let Some(existing) = new_distribution.get(new_key.as_str()) { + existing + } else { &0 }; + + new_distribution.insert(new_key.clone(), value + existing); + } + } + + let new_results = AnalysisResult::new(new_distribution); + self.cached_filtered.insert(cache_key, new_results.clone()); + if self.is_tracing() { + log!(Level::Info, "Results for [{}]: {}", qb.iter().map(|val| val.to_string()).collect::>().join(", "), new_results.to_string()); + } + + new_results + } + + /// Take the projection so far, build up a backend execution and then execute against an + /// available QPU. + pub fn concretize(&mut self) -> &AnalysisResult { + if self.cached_result.is_some() { + return self.cached_result.as_ref().unwrap().borrow(); + } + + let query_result = if self.can_predict() { + self.predict() + } else { + let engine = self.engines.get_available_QPU(); + + // If we're running again, clear down before execution. + engine.clear(); + for inst in self.instructions.iter() { + match inst.deref() { + QuantumOperations::Initialize() => {} + QuantumOperations::Reset(qbs) => { + for qubit in qbs { + engine.reset(qubit); + } + } + QuantumOperations::I(qb) => { engine.i(qb); }, + QuantumOperations::U(qb, theta, phi, lambda) => { + engine.u(qb, theta.clone(), phi.clone(), lambda.clone()); + } + QuantumOperations::X(qb, radians) => { + engine.x(qb, radians.clone()); + } + QuantumOperations::Y(qb, radians) => { + engine.y(qb, radians.clone()); + } + QuantumOperations::Z(qb, radians) => { + engine.z(qb, radians.clone()); + } + QuantumOperations::CX(controls, targets, radians) => { + engine.cx(controls, targets, radians.clone()); + } + QuantumOperations::CZ(controls, targets, radians) => { + engine.cz(controls, targets, radians.clone()); + } + QuantumOperations::CY(controls, targets, radians) => { + engine.cy(controls, targets, radians.clone()); + } + QuantumOperations::Measure(qbs) => { + for qb in qbs { + engine.measure(qb); + } + } + } + } + + engine.execute() + }; + + self.cached_result = Some(query_result); + + if self.is_tracing() { + log!(Level::Info, "Executed circuit:"); + for inst in self.instructions.iter() { + log!(Level::Info, "{}", inst.to_string()) + } + log!(Level::Info, "Projection results:"); + + // Order results so you can easily compare 2 side-by-side. + let mut result_values = self.cached_result.as_ref().unwrap().distribution.iter().collect::>(); + result_values.sort_by(|(left_key, _), (right_key, _)| left_key.cmp(right_key)); + for (key, value) in result_values.iter() { + log!(Level::Info, " \"{}\": {}", key.clone(), value) + } + } + + self.cached_result.as_ref().unwrap().borrow() + } +} + +impl Clone for QuantumProjection { + fn clone(&self) -> Self { + QuantumProjection { + trace_module: self.trace_module.clone(), + engines: self.engines.clone(), + instructions: self.instructions.clone(), + cached_result: self.cached_result.clone(), + cached_filtered: self.cached_filtered.clone() + } + } +} + +impl Display for QuantumProjection { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str("q-projection") + } +} + +impl PartialEq for QuantumProjection { + fn eq(&self, other: &Self) -> bool { + self.is_equal_for(other, None) + } +} + +impl PartialOrd for QuantumProjection { + fn partial_cmp(&self, other: &Self) -> Option { + // TODO + Some(Ordering::Equal) + } +} + +impl Eq for QuantumProjection { +} + +pub struct AnalysisResult { + pub distribution: HashMap +} + +impl AnalysisResult { + pub fn new(distribution: HashMap) -> AnalysisResult { + AnalysisResult { distribution } + } + + pub fn is_empty(&self) -> bool { + self.size() == 0 + } + + /// Return size of the results register in qubits. + pub fn size(&self) -> usize { + self.distribution.keys().next().map_or(0, |val| val.len()).clone() + } + + pub fn one() -> AnalysisResult { + AnalysisResult::new(HashMap::from( + [("1".to_string(), 100)] + )) + } + + pub fn zero() -> AnalysisResult { + AnalysisResult::new(HashMap::from( + [("0".to_string(), 100)] + )) + } + + pub fn empty() -> AnalysisResult { + AnalysisResult::default() + } + + /// Compare whether this value is either 0/1 single qubit-wise, or if it's overwhelmingly + /// one particular value. Aka 11110 or 00001. + /// + /// This is not precisely correct as you can't say a binary sequence is the same as zero or one, + /// but for interpretation if someone asks you 'is this one' or 'is this zero' with no nuanced + /// opinions about the matter, it's one of the nicer interpretations. Limiting it to purely + /// single-qubit calculations so it really IS zero or one is more accurate but too limiting in + /// my mind. + fn is_value(&self, value: char) -> bool { + let mut value_count = 0; + let total_count: i64 = self.distribution.values().sum(); + for (key, val) in self.distribution.iter() { + let mut count = 0; + for char in key.chars() { + if char == value { + count += 1; + } + + // Ceiling due to <= comparison. + let key_count = key.chars().count() as f64; + if count >= (key_count / 2.0).ceil() as i64 { + value_count += val; + } + } + } + + value_count >= (total_count / 2) + } + + pub fn is_one(&self) -> bool { + self.is_value('1') + } + + pub fn is_zero(&self) -> bool { + self.is_value('0') + } +} + +impl PartialEq for AnalysisResult { + fn eq(&self, other: &Self) -> bool { + // TODO: decide whether to do proper distribution analysis + let self_is_one = self.is_one(); + let other_is_one = self.is_one(); + self_is_one == other_is_one + } +} + +impl Default for AnalysisResult { + fn default() -> Self { + AnalysisResult::new(HashMap::new()) + } +} + +impl Eq for AnalysisResult { +} + +impl Clone for AnalysisResult { + fn clone(&self) -> Self { + AnalysisResult::new(self.distribution.clone()) + } +} + +impl Display for AnalysisResult { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_map().entries(self.distribution.iter()).finish() + } +} \ No newline at end of file diff --git a/src/munchkin/pykin/src/builders.rs b/src/munchkin/pykin/src/builders.rs new file mode 100644 index 0000000..68d3efb --- /dev/null +++ b/src/munchkin/pykin/src/builders.rs @@ -0,0 +1,265 @@ +use std::borrow::Borrow; +use std::f64::consts::PI; +use std::ops::{Deref, DerefMut}; +use pyo3::{PyAny, PyResult}; +use crate::analysis::{AnalysisResult}; +use crate::hardware::{Qubit}; +use crate::smart_pointers::{Ptr}; + +macro_rules! python_methods { + (self.$wrapped_obj:ident.$python_gate:ident()) => { + pub fn $python_gate(&self) -> Option> { + if Ptr::is_not_null(&self.$wrapped_obj) { + let pyobj: &PyAny = self.$wrapped_obj.borrow(); + let has_gate = pyobj.hasattr(stringify!($python_gate)).unwrap_or(false); + if has_gate { + let func = pyobj.getattr(stringify!($python_gate)).unwrap(); + Some(func.call0()) + } else { None } + } else { None } + } + }; + (self.$wrapped_obj:ident.$python_gate:ident($($var:ident: $ty:ty),*)) => { + pub fn $python_gate(&self, $($var: $ty),*) -> Option> { + if Ptr::is_not_null(&self.$wrapped_obj) { + let pyobj: &PyAny = self.$wrapped_obj.borrow(); + let has_gate = pyobj.hasattr(stringify!($python_gate)).unwrap_or(false); + if has_gate { + let func = pyobj.getattr(stringify!($python_gate)).unwrap(); + Some(func.call1(($($var),*,))) + } else { None } + } else { None } + } + } +} + +struct PyBuilderAdaptor { + builder: Ptr +} + +impl PyBuilderAdaptor { + fn new(builder: &PyAny) -> PyBuilderAdaptor { + PyBuilderAdaptor { builder: Ptr::from(builder) } + } + + pub fn is_adaptor_empty(&self) -> bool { + return Ptr::is_null(self.builder.borrow()) || self.builder.is_none() + } + + python_methods!(self.builder.x(qubit: i64, radians: f64)); + python_methods!(self.builder.y(qubit: i64, radians: f64)); + python_methods!(self.builder.z(qubit: i64, radians: f64)); + python_methods!(self.builder.cx(controls: Vec, target: i64, radian: f64)); + python_methods!(self.builder.cy(controls: Vec, target: i64, radian: f64)); + python_methods!(self.builder.cz(controls: Vec, target: i64, radian: f64)); + python_methods!(self.builder.reset(qubit: i64)); + python_methods!(self.builder.measure(qubit: i64)); + python_methods!(self.builder.clear()); +} + +impl Deref for PyBuilderAdaptor { + type Target = PyAny; + + fn deref(&self) -> &Self::Target { + self.builder.deref() + } +} + +impl DerefMut for PyBuilderAdaptor { + fn deref_mut(&mut self) -> &mut Self::Target { + self.builder.deref_mut() + } +} + +impl Default for PyBuilderAdaptor { + fn default() -> Self { + PyBuilderAdaptor { builder: Ptr::default() } + } +} + +struct PyRuntimeAdaptor { + runtime: Ptr +} + +impl PyRuntimeAdaptor { + fn new(runtime: &PyAny) -> PyRuntimeAdaptor { + PyRuntimeAdaptor { runtime: Ptr::from(runtime) } + } + + pub fn is_adaptor_empty(&self) -> bool { + return Ptr::is_null(self.runtime.borrow()) || self.runtime.is_none() + } + + python_methods!(self.runtime.execute(builder: &PyAny)); +} + +impl Deref for PyRuntimeAdaptor { + type Target = PyAny; + + fn deref(&self) -> &Self::Target { + self.runtime.deref() + } +} + +impl DerefMut for PyRuntimeAdaptor { + fn deref_mut(&mut self) -> &mut Self::Target { + self.runtime.deref_mut() + } +} + +impl Default for PyRuntimeAdaptor { + fn default() -> Self { + PyRuntimeAdaptor { runtime: Ptr::default() } + } +} + +pub struct PythonEngine { + builder: PyBuilderAdaptor, + runtime: PyRuntimeAdaptor +} + +impl PythonEngine { + pub fn new(builder: &PyAny, backend: &PyAny) -> PythonEngine { + PythonEngine { builder: PyBuilderAdaptor::new(builder), runtime: PyRuntimeAdaptor::new(backend) } + } + + pub fn execute(&self) -> AnalysisResult { + if self.runtime.is_adaptor_empty() || self.builder.is_adaptor_empty() { + return AnalysisResult::empty(); + } + + let result = self.runtime.execute(self.builder.deref()) + .expect("Engine doesn't have an execute method.").expect("QPU didn't return a result."); + + AnalysisResult::new( + result.extract().expect("Object returned from 'execute' isn't a distribution dictionary.")) + } +} + +impl Default for PythonEngine { + fn default() -> Self { + PythonEngine { builder: PyBuilderAdaptor::default(), runtime: PyRuntimeAdaptor::default() } + } +} + +impl Builder for PythonEngine { + fn clear(&self) -> &Self { + self.builder.clear(); + self + } +} + +// TODO: Make sure we propagate Python exceptions for easy debugging. +impl InstructionBuilder for PythonEngine { + fn measure(&self, qb: &Qubit) -> &Self { + self.builder.measure(qb.index); + self + } + + fn x(&self, qb: &Qubit, radians: f64) -> &Self { + self.builder.x(qb.index, radians); + self + } + + fn y(&self, qb: &Qubit, radians: f64) -> &Self { + self.builder.y(qb.index, radians); + self + } + + fn z(&self, qb: &Qubit, radians: f64) -> &Self { + self.builder.z(qb.index, radians); + self + } + + fn cx(&self, controls: &Vec, target: &Qubit, radians: f64) -> &Self { + let controls: Vec = controls.iter().map(|val| val.index).collect::>(); + self.builder.cx(controls, target.index, radians); + self + } + + fn cy(&self, controls: &Vec, target: &Qubit, radians: f64) -> &Self { + let controls = controls.iter().map(|val| val.index).collect::>(); + self.builder.cy(controls, target.index, radians); + self + } + + fn cz(&self, controls: &Vec, target: &Qubit, radians: f64) -> &Self { + let controls: Vec = controls.iter().map(|val| val.index).collect::>(); + self.builder.cz(controls, target.index, radians); + self + } + + fn reset(&self, qb: &Qubit) -> &Self { + self.builder.reset(qb.index); + self + } +} + +pub trait Builder { + fn clear(&self) -> &Self { self } +} + +pub trait InstructionBuilder: Builder { + fn measure(&self, qb: &Qubit) -> &Self { self } + + fn had(&self, qb: &Qubit) -> &Self { + self.z(qb, PI); + self.y(qb, PI / 2.0) + } + + fn i(&self, qb: &Qubit) -> &Self { + self + } + + fn x(&self, qb: &Qubit, radii: f64) -> &Self { self } + + fn y(&self, qb: &Qubit, radii: f64) -> &Self { self } + + fn z(&self, qb: &Qubit, radii: f64) -> &Self { self } + + fn u(&self, qb: &Qubit, theta: f64, phi: f64, lambda: f64) -> &Self { + self.z(qb, lambda).y(qb, phi).z(qb, theta) + } + + fn swap(&self, first: &Qubit, second: &Qubit) -> &Self { self } + + fn sx(&self, qb: &Qubit) -> &Self { + self.x(qb, PI / 2.0) + } + + fn sx_dgr(&self, qb: &Qubit) -> &Self { + self.x(qb, -(PI / 2.0)) + } + + fn s(&self, qb: &Qubit) -> &Self { + self.z(qb, PI / 2.0) + } + + fn s_dgr(&self, qb: &Qubit) -> &Self { + self.z(qb, -(PI / 2.0)) + } + + fn t(&self, qb: &Qubit) -> &Self { + self.z(qb, PI / 4.0) + } + + fn t_dgr(&self, qb: &Qubit) -> &Self { + self.z(qb, -(PI / 4.0)) + } + + fn cx(&self, controls: &Vec, target: &Qubit, radii: f64) -> &Self { self } + + fn cy(&self, controls: &Vec, target: &Qubit, radii: f64) -> &Self { self } + + fn cz(&self, controls: &Vec, target: &Qubit, radii: f64) -> &Self { self } + + fn cnot(&self, control: &Qubit, target: &Qubit, radii: f64) -> &Self { + self.cx(&vec!(control.clone()), target, radii) + } + + fn ccnot(&self, c1: &Qubit, c2: &Qubit, target: &Qubit, radii: f64) -> &Self { + self.cx(&vec!(c1.clone(), c2.clone()), target, radii) + } + + fn reset(&self, qb: &Qubit) -> &Self { self } +} \ No newline at end of file diff --git a/src/munchkin/pykin/src/evaluator.rs b/src/munchkin/pykin/src/evaluator.rs new file mode 100644 index 0000000..37fd015 --- /dev/null +++ b/src/munchkin/pykin/src/evaluator.rs @@ -0,0 +1,1631 @@ +use inkwell::module::Module; +use std::collections::{HashMap}; +use inkwell::values::{AggregateValue, AnyValue, AnyValueEnum, AsValueRef, BasicValue, FunctionValue, InstructionOpcode, InstructionValue}; +use inkwell::types::{AnyTypeEnum}; +use std::f64::consts::PI; +use log::warn; +use regex::Regex; +use inkwell::{FloatPredicate, IntPredicate}; +use std::borrow::{Borrow, BorrowMut}; +use std::ffi::{c_uint, CStr}; +use std::ops::{Deref}; +use inkwell::basic_block::BasicBlock; +use llvm_sys::core::{LLVMConstIntGetSExtValue, LLVMGetElementType, LLVMGetNumOperands, LLVMGetOperand, LLVMGetTypeKind, LLVMPrintTypeToString, LLVMPrintValueToString, LLVMTypeOf}; +use llvm_sys::LLVMTypeKind; +use crate::with_mutable; +use crate::graphs::{Node, AnalysisGraph, AnalysisGraphBuilder, ExecutableAnalysisGraph, CallableAnalysisGraph}; +use crate::hardware::Qubit; +use crate::instructions::{Condition, Equalities, Expression, Instruction, LambdaModifier, Operator, Pauli, Value}; +use crate::runtime::RuntimeContext; +use crate::smart_pointers::{Ptr}; + +macro_rules! operand_to_value { + ($target:ident, $index:expr) => ($target.get_operand($index).expect("Can't resolve operand.").left().expect("Operand isn't a value.").as_any_value_enum().borrow_mut()); +} + +macro_rules! operand_to_instruction { + ($target:ident, $index:expr) => ($target.get_operand($index).expect("Can't resolve operand.").left().expect("Operand isn't a value.")); +} + +macro_rules! operand_to_bb { + ($target:ident, $index:expr) => ($target.get_operand($index).expect("Can't resolve operand.").right().expect("Operand isn't a basic block.")); +} + +// TODO: Since Inkwell dosen't expose things properly try and use the llvm-sys objects to find the +// data. We want to remove all the string fetching/matching as it's inefficent. + +/// Fetches the &{value} from a stringified LLVM instruction to give a loose name to the values we +/// reference. +pub fn get_ref_id_from_instruction(inst: &InstructionValue) -> String { + parse_ref_id_from_instruction(inst).expect("Can't find ref-id from instruction") +} + +pub fn parse_ref_id_from_instruction(inst: &InstructionValue) -> Option { + let inst_str = inst.to_string().trim_end_matches("\"").trim_start_matches("\"").trim().to_string(); + parse_ref_id_from_instruction_str(&inst_str) +} + +pub fn parse_ref_id_from_instruction_str(inst_str: &String) -> Option { + let llvm_var_finder = Regex::new("([%@][^ ]*) =").unwrap(); + llvm_var_finder.captures(inst_str.as_str()) + .map_or_else( + || parse_ref_id_from_value(inst_str.clone()).or(None), + |capture_groups| Some(capture_groups.get(1).unwrap().as_str().to_string())) +} + +/// Gets the variable name, %var_name, from a type/value string. +pub fn get_ref_id_from_value(ptr_string: String) -> String { + parse_ref_id_from_value(ptr_string).expect("Can't parse ref-id from value.") +} + +/// Gets the variable name, %var_name, from a type/value string. +/// TODO: Need a proper way to get the variables from a general state, while this works it's not +/// entirely bulletproof and needs tweaking as issues come up. And issues caused from it are not +/// immediately obvious. +pub fn parse_ref_id_from_value(ptr_string: String) -> Option { + let ptr_string = ptr_string.trim_matches('"').trim(); + let pointer_variable_finder = Regex::new("^.*\\s(%[\\w0-9\\-]+)$").unwrap(); + let capture_groups = pointer_variable_finder.captures(ptr_string); + let mut ref_id = capture_groups.map_or(None, |val| { + Some(val.get(1).unwrap().as_str().to_string()) + }); + + // If we can't find a local variable, look globally. + ref_id = ref_id.or_else(|| { + let pointer_variable_finder = Regex::new("^.*\\s(@[\\w0-9\\-]+)$").unwrap(); + let capture_groups = pointer_variable_finder.captures(ptr_string); + capture_groups.map_or(None, |val| { + let val = val.get(1).unwrap().as_str().to_string(); + if val.trim().is_empty() { None } else { Some(val) } + }) + }); + + // Finally check if we're a global instruction target. + ref_id.or_else(|| { + let pointer_variable_finder = Regex::new("^@[^\\s*]+(\\s|$)").unwrap(); + let capture_groups = pointer_variable_finder.captures(ptr_string); + capture_groups.map_or(None, |value| { + let mut value = value.get(0).unwrap().as_str(); + value = value.trim(); + if value.is_empty() { None } else { Some(value.to_string()) } + }) + }) +} + +pub struct EvaluationContext<'ctx> { + pub module: Ptr>, + pub global_variables: Ptr>>, + + // Basic-block anchor nodes, essentially start/end ones. + pub anchors: HashMap>, + pub method_graphs: Ptr>>, + + /// Hack for QIR implementations that don't implement variables at all and assume + /// measures/returns by magic means. If not empty all returns will instead return + /// the values in the list. + pub is_base_profile: Ptr, + pub throwaway_variables: Ptr +} + +impl<'a> EvaluationContext<'a> { + pub fn new(module: &Ptr>) -> EvaluationContext<'a> { + EvaluationContext { + module: module.clone(), + global_variables: Ptr::from(HashMap::new()), + anchors: HashMap::new(), + method_graphs: Ptr::from(HashMap::new()), + is_base_profile: Ptr::from(false), + throwaway_variables: Ptr::from(0) + } + } + + pub fn create_subcontext(parent: &Ptr>) -> EvaluationContext<'a> { + EvaluationContext { + module: parent.module.clone(), + global_variables: parent.global_variables.clone(), + anchors: HashMap::new(), + method_graphs: parent.method_graphs.clone(), + is_base_profile: parent.is_base_profile.clone(), + throwaway_variables: parent.throwaway_variables.clone() + } + } + + /// Gets the next throwaway variable for assignment. + pub fn next_throwaway(&self) -> String { + let var = format!("_eph_{}", self.throwaway_variables); + unsafe { + self.throwaway_variables.as_ptr().replace(*self.throwaway_variables + 1); + } + var + } +} + +pub struct QIREvaluator {} + +impl QIREvaluator { + pub fn new() -> QIREvaluator { + QIREvaluator {} + } + + /// Evaluates a module and entry-point for execution. Returns the execution graph to then be + /// run in an interpreter. + pub fn evaluate(&self, entry_point: &FunctionValue, module: &Ptr) -> Result, String> { + let mut context = Ptr::from(EvaluationContext::new(module)); + let mut target_global = module.get_first_global(); + while target_global.is_some() { + let global = target_global.unwrap(); + let maybe_initializer = global.get_initializer(); + if let Some(init) = maybe_initializer { + // TODO: Remove graph requirement from as_value. + if let Some(value) = self.as_value(&init.as_any_value_enum(), &Ptr::default(), context.borrow()) { + // Some globals seem invalid here, in that case don't add them. + if let Some(ref_id) = parse_ref_id_from_instruction_str(&global.to_string()) { + context.global_variables.insert(ref_id, Ptr::from(value)); + } + } + } + + target_global = global.get_next_global(); + } + + let builder = self.walk_function(entry_point, context.borrow()); + + // Create a callable graph with its arguments, but the values set as empty (validly). + let mut callable = Ptr::from(CallableAnalysisGraph::new(&builder.graph)); + for param in entry_point.get_params().iter() { + let param_ref_id = get_ref_id_from_value(param.to_string()); + callable.argument_mappings.insert(param_ref_id, Ptr::from(Value::Empty)); + } + + let exe_graph = ExecutableAnalysisGraph::with_context( + &callable, &Ptr::from(RuntimeContext::from_evaluation(&context))); + + Ok(Ptr::from(exe_graph)) + } + + /// For-now method to retrieve the name of a Call target. I'm sure it's in the + /// instruction somewhere but not obvious how to retrieve it via this API. + fn get_method_name(&self, inst: &InstructionValue) -> Option { + if inst.get_opcode() != InstructionOpcode::Call { + return None; + } + + let mut operation_name = inst.print_to_string().to_string(); + let start = operation_name.find("@")?; + let end = operation_name.find("(")?; + let mut call_name = operation_name.split_off(start+1); + call_name.truncate(end-start-1); + Some(String::from(call_name)) + } + + /// Note: on the exit of a basic block there should be no auto-attach point. Each BB is isolated + /// and the various jumps are dealt with by attaching to the anchors and distinct operations. + fn walk_basic_block(&self, bb: &BasicBlock, graph: &Ptr, + context: &Ptr) { + + // Attach our starting anchor as the default attach point to get started. + let bb_name = bb.get_name().to_str().unwrap(); + let starting_point = with_mutable!(context.anchors.get_mut(bb_name).expect("Anchor needs to exist.")); + with_mutable!(graph.reattach(starting_point)); + + let mut next_inst = bb.get_first_instruction(); + while next_inst.is_some() { + let inst = Ptr::from(next_inst.unwrap()); + self.walk_instruction(inst.borrow(), graph, context); + next_inst = inst.get_next_instruction(); + } + + with_mutable!(graph.unattach()); + } + + fn walk_function(&self, func: &FunctionValue, + context: &Ptr) -> Ptr { + let method_name = func.get_name().to_str().unwrap().to_string(); + if let Some(existing) = context.method_graphs.get(method_name.as_str()) { + return Ptr::from(AnalysisGraphBuilder::new(existing)); + } + + let mut subcontext = Ptr::from(EvaluationContext::create_subcontext(context)); + + let graph = Ptr::from(AnalysisGraph::new(method_name.clone())); + with_mutable!(context.method_graphs.insert(method_name, graph.clone())); + + // Build up anchor labels/nodes so we an associate them at the start and end. + for bb in func.get_basic_blocks() { + let bb_name = bb.get_name().to_str().unwrap().to_string(); + let anchor_node = with_mutable!(graph.add_loose(Instruction::Label(bb_name.clone()))); + subcontext.anchors.insert(bb_name, anchor_node.clone()); + } + + let builder = Ptr::from(AnalysisGraphBuilder::new(graph.borrow())); + for bb in func.get_basic_blocks().iter() { + self.walk_basic_block(bb, &builder, subcontext.borrow()); + } + + return builder; + } + + /// Hacked-together method to centralize GEP extraction. Done using llvm-sys objects because + /// Inkwell doesn't have any way to access operands when a GEP is an argument, and unknown + /// when it will. + fn extract_gep(&self, any_val: &AnyValueEnum, graph: &Ptr, + context: &Ptr) -> Option { + unsafe { + let expr = any_val.as_value_ref(); + let first_op = LLVMGetOperand(expr, 0); + + // For now assume getelementptr only works on tuples/vectors/structs and we have an + // element kind. May not always hold. + let type_ref = LLVMTypeOf(first_op); + let type_string = CStr::from_ptr(LLVMPrintTypeToString(type_ref)).to_str().unwrap().to_string(); + let ele_type = LLVMGetElementType(type_ref); + let ele_kind = LLVMGetTypeKind(ele_type); + + let llvm_string = CStr::from_ptr(LLVMPrintValueToString(first_op)); + let ref_id = parse_ref_id_from_instruction_str(&llvm_string.to_string_lossy().to_string()).expect("Need ref-id from instruction"); + let mut prev_throwaway = context.next_throwaway(); + graph.Assign(prev_throwaway.clone(), Value::Ref(ref_id.clone(), None)); + + let op_num = LLVMGetNumOperands(expr); + + // The first operand is the type/pointer, second is walking through the pointer so is + // always 0, we ignore this. + let mut starting_operand = 2; + + // If we're an array we skip another indexer as GEP sets the address at the beginning + // of an array for iteration. + // TODO: Likely not accurate for every type, especially jagged arrays. + if ele_kind == LLVMTypeKind::LLVMArrayTypeKind { + starting_operand += 1; + } + + while starting_operand < op_num { + let next_throwaway = context.next_throwaway(); + let op_value = unsafe { LLVMGetOperand(expr, starting_operand as c_uint) }; + let actual_index: i64 = LLVMConstIntGetSExtValue(op_value); + graph.Assign(next_throwaway.clone(), Value::Ref(prev_throwaway.clone(), Some(Ptr::from(Value::Int(actual_index))))); + prev_throwaway = next_throwaway; + starting_operand += 1; + } + + Some(Value::Ref(prev_throwaway.clone(), None)) + } + } + + /// Private recursive method for as_value, extracted out for easier access to state. You will + /// almost never want to use this directly. Use as_value instead. + fn _as_value_recursive(&self, graph: &Ptr, type_enum: &AnyTypeEnum, + val_enum: &AnyValueEnum, context: &Ptr) -> Option { + let ref_id = parse_ref_id_from_value(val_enum.to_string()); + if ref_id.is_some() { + return Some(Value::Ref(ref_id.unwrap(), None)) + } + + match type_enum { + AnyTypeEnum::ArrayType(t) => { + // Arrays are either strings or constant arrays of values, so redirect based + // on which one it is. + let vec = val_enum.into_array_value(); + if vec.is_const_string() { + Some(Value::String(vec.get_string_constant().unwrap().to_str().unwrap().to_string())) + } else if vec.is_const() { + let mut result = Vec::new(); + for int in 0..(t.len()) { + result.push( + self.as_value_ptr( + &vec.const_extract_value(&mut [int]).as_any_value_enum(), graph, context).expect("Can't resolve array element.")); + } + + Some(Value::Array(result)) + } else { + Some(Value::Empty) + } + }, + AnyTypeEnum::FloatType(t) => { + let llvm_context = t.get_context(); + + // Second value is about losiness of floats, which right now we discard. + let numeric = val_enum.into_float_value().get_constant().expect("Float parsing has failed.").0; + + // TODO: Find a way to make f128 work, if it needs it. + return if t == llvm_context.f16_type().borrow_mut() { + Some(Value::Float(numeric)) + } else if t == llvm_context.f32_type().borrow_mut() { + Some(Value::Float(numeric)) + } else if t == llvm_context.f64_type().borrow_mut() { + Some(Value::Float(numeric)) + } else if t == llvm_context.f128_type().borrow_mut() { + Some(Value::Float(numeric)) + } else { + Some(Value::Float(numeric)) + } + }, + AnyTypeEnum::IntType(t) => { + let llvm_context = t.get_context(); + let numeric = val_enum.into_int_value().get_sign_extended_constant().expect("Int parsing has failed."); + + // TODO: Doesn't _really_ deal with longs. + return if t == llvm_context.bool_type().borrow_mut() { + // Bools come in as -1 = true, 0 = false, no clue why. + // Think it's some C thing. + Some(Value::Bool(numeric == -1)) + } else if t == llvm_context.i8_type().borrow_mut() { + Some(Value::Short(numeric as i16)) + } else if t == llvm_context.i16_type().borrow_mut() { + Some(Value::Short(numeric as i16)) + } else if t == llvm_context.i32_type().borrow_mut() { + Some(Value::Int(numeric)) + } else if t == llvm_context.i64_type().borrow_mut() { + Some(Value::Int(numeric)) + } else if t == llvm_context.i128_type().borrow_mut() { + Some(Value::Long(numeric as i128)) + } else { + Some(Value::Int(numeric)) + } + }, + AnyTypeEnum::PointerType(t) => { + // TODO: GEP analysis and fixing should be cleaned up as soon as Inkwell + // supports it. + let full_value_str = val_enum.to_string(); + if full_value_str.contains("getelementptr") { + self.extract_gep(val_enum, graph, context) + } else { + let pval = val_enum.into_pointer_value(); + let is_struct = t.get_element_type().is_struct_type(); + + // Structs, especially opaque ones, have their own rules and even if they're + // null it may mean something very different. + if (pval.is_null() || pval.is_undef()) && !is_struct { + return Some(Value::Empty); + } + + // At this point all actual pointers should have been dealt with, so + // we simply need to roll through the pointer to its actual type. + self._as_value_recursive(graph, t.get_element_type().borrow(), val_enum, context) + } + }, + AnyTypeEnum::StructType(t) => { + // Opaques need us to give them a type... + if t.is_opaque() { + let struct_name = t.get_name().unwrap_or_default().to_str().unwrap_or_default(); + let ptr_val = val_enum.into_pointer_value(); + let index = if ptr_val.is_null() || ptr_val.is_undef() { + 0 + } else { + ptr_val.const_to_int(context.module.get_context().i64_type()) + .get_sign_extended_constant().unwrap_or_default() as i64 + }; + + // TODO: Make custom results object, probably re-use projection results. + match struct_name { + "Qubit" => Some(Value::Qubit(Qubit::new(index))), + "Result" => Some(Value::Int(index)), + val => { + // Qubit and results are special in that their nulls = 0, everything + // else null is implied as empty. + if ptr_val.is_null() { + return Some(Value::Empty); + } + + unimplemented!() + } + } + + // Where-as pure composites already have a structure, it's just freeform. + // For now just consider them arrays (as access is the same). + + // TODO: Decide whether arrays/composites need to be distinguished. + } else { + let struct_val = val_enum.into_struct_value(); + let mut result = Vec::new(); + for int in 0..(t.count_fields()) { + result.push( + self.as_value_ptr( + &struct_val.const_extract_value(&mut [int]).as_any_value_enum(), graph, context).expect("Can't resolve struct element.")); + } + + Some(Value::Array(result)) + } + }, + AnyTypeEnum::VectorType(_) => { unimplemented!() }, + AnyTypeEnum::VoidType(_) => { unimplemented!() }, + AnyTypeEnum::FunctionType(_) => { unimplemented!() }, + } + } + + /// See [as_value] but returns value wrapped in a flexi-pointer. + fn as_value_ptr(&self, any_val: &AnyValueEnum, + graph: &Ptr, + context: &Ptr) -> Option> { + let result = self.as_value(any_val, graph, context); + result.map(|val| Ptr::from(val)) + } + + /// Evaluates the instruction and returns its results as a value. + fn as_value(&self, any_val: &AnyValueEnum, + graph: &Ptr, + context: &Ptr) -> Option { + let function_val = match any_val { + AnyValueEnum::FunctionValue(fv) => Some(Value::Ref(fv.get_name().to_str().unwrap().to_string(), None)), + _ => None + }; + + // If we're a function value don't continue, as their to_string is the entire function. + if function_val.is_some() { + return function_val + } + + let instruction_value = match any_val { + AnyValueEnum::ArrayValue(av) => av.as_instruction(), + AnyValueEnum::FloatValue(av) => av.as_instruction(), + AnyValueEnum::IntValue(av) => av.as_instruction(), + AnyValueEnum::PointerValue(av) => av.as_instruction(), + AnyValueEnum::StructValue(av) => av.as_instruction(), + AnyValueEnum::VectorValue(av) => av.as_instruction(), + _ => None + }; + + // If we're an instruction get the value assignment for the result of it instead. + if instruction_value.is_some() { + let ref_id = get_ref_id_from_instruction(instruction_value.unwrap().borrow()); + return Some(Value::Ref(ref_id, None)); + } + + // Recursive method for looping through pointers to get to values. + self._as_value_recursive(graph, any_val.get_type().borrow(), any_val, context) + } + + /// Evaluates the instructions and adds them to the graph as we go. + fn walk_instruction(&self, inst: &Ptr, + graph: &Ptr, + context: &Ptr) { + let op_code = inst.get_opcode(); + match op_code { + InstructionOpcode::Call => { + self.eval_call(inst, graph, context); + }, + InstructionOpcode::Return => { + self.eval_ret(inst, graph, context); + }, + InstructionOpcode::Br => { + self.eval_branch(inst, graph, context); + }, + InstructionOpcode::Switch | + InstructionOpcode::IndirectBr | + InstructionOpcode::Invoke | + InstructionOpcode::FNeg => { + self.eval_fneg(inst, graph, context); + }, + InstructionOpcode::Add => { + self.eval_add(inst, graph, context); + }, + InstructionOpcode::FAdd => { + self.eval_add(inst, graph, context); + }, + InstructionOpcode::Sub => { + self.eval_sub(inst, graph, context); + }, + InstructionOpcode::FSub => { + self.eval_sub(inst, graph, context); + }, + InstructionOpcode::Mul => { + self.eval_mul(inst, graph, context); + }, + InstructionOpcode::FMul => { + self.eval_mul(inst, graph, context); + }, + InstructionOpcode::UDiv => { + self.eval_div(inst, graph, context); + }, + InstructionOpcode::SDiv => { + self.eval_div(inst, graph, context); + }, + InstructionOpcode::FDiv => { + self.eval_div(inst, graph, context); + }, + InstructionOpcode::URem | + InstructionOpcode::SRem | + InstructionOpcode::FRem | + InstructionOpcode::Shl | + InstructionOpcode::LShr | + InstructionOpcode::AShr => { + todo!("{}", inst.print_to_string().to_string()) + }, + InstructionOpcode::And => { + self.eval_or(inst, graph, context); + }, + InstructionOpcode::Or => { + self.eval_or(inst, graph, context); + }, + InstructionOpcode::Xor => { + self.eval_xor(inst, graph, context); + }, + InstructionOpcode::ExtractElement | + InstructionOpcode::InsertElement | + InstructionOpcode::ShuffleVector => { + todo!("{}", inst.print_to_string().to_string()) + }, + InstructionOpcode::ExtractValue => { + self.eval_extractvalue(inst, graph, context); + }, + InstructionOpcode::InsertValue => { + self.eval_insertvalue(inst, graph, context); + }, + InstructionOpcode::Load => { + self.eval_load(inst, graph, context); + }, + InstructionOpcode::Store => { + self.eval_store(inst, graph, context); + }, + InstructionOpcode::Fence | + InstructionOpcode::AtomicCmpXchg | + InstructionOpcode::AtomicRMW => { + todo!("{}", inst.print_to_string().to_string()) + }, + InstructionOpcode::GetElementPtr => { + self.eval_getelementptr(inst, graph, context); + }, + InstructionOpcode::Trunc => { + self.eval_trunc(inst, graph, context); + }, + InstructionOpcode::FPTrunc => { + self.eval_trunc(inst, graph, context); + }, + InstructionOpcode::ZExt | + InstructionOpcode::FPExt | + InstructionOpcode::SExt => { + todo!("{}", inst.print_to_string().to_string()) + }, + InstructionOpcode::FPToUI => { + self.eval_numeric_cast(inst, graph, context); + }, + InstructionOpcode::UIToFP => { + self.eval_numeric_cast(inst, graph, context); + }, + InstructionOpcode::FPToSI => { + self.eval_numeric_cast(inst, graph, context); + }, + InstructionOpcode::SIToFP => { + self.eval_numeric_cast(inst, graph, context); + }, + InstructionOpcode::PtrToInt => { + todo!("{}", inst.print_to_string().to_string()) + }, + InstructionOpcode::IntToPtr => { + self.eval_int_to_ptr(inst, graph, context); + }, + InstructionOpcode::BitCast => { + self.eval_bitcast(inst, graph, context); + }, + InstructionOpcode::AddrSpaceCast => { + todo!("{}", inst.print_to_string().to_string()) + }, + InstructionOpcode::ICmp => { + self.eval_icmp(inst, graph, context); + }, + InstructionOpcode::FCmp => { + self.eval_icmp(inst, graph, context); + }, + InstructionOpcode::Phi => { + // All a phi's logic is taken care of by the associated branches, so the phi itself + // doesn't need to be processed. + }, + InstructionOpcode::Select => { + self.eval_select(inst, graph, context); + }, + InstructionOpcode::Alloca | + InstructionOpcode::Resume | + InstructionOpcode::Freeze | + InstructionOpcode::VAArg | + InstructionOpcode::LandingPad | + InstructionOpcode::CatchPad | + InstructionOpcode::CleanupPad | + InstructionOpcode::Unreachable => { + // Instructions we likely won't need for quite some time, if ever. + }, + _ => panic!("Unknown instruction type: {}! Can't verify program will execute correctly.", inst.print_to_string().to_string()) + } + } + + fn eval_intrinsic(&self, name: String, inst: &Ptr, graph: &Ptr, context: &Ptr) -> Option { + let parse_as_value= |inst: &Ptr, index: u32| -> Option { + let op = inst.get_operand(index).expect(&*format!("Operand at {} doesn't exist", 0)); + let qb_value = op.left().expect("Operand isn't a value."); + self.as_value(qb_value.as_any_value_enum().borrow(), graph, context) + }; + + let parse_qubit = |inst: &Ptr, index: u32| -> Value { + parse_as_value(inst, index).expect("Can't find a qubit variable.") + }; + + // Parse the lambda array, evaluate all potential methods and return the first one to + // use as an anchor. + let parse_default_callable = |global_name: &String| -> Option> { + context.global_variables.get(global_name).map_or(None, |callable_array| { + let mut first = None; + for val in callable_array.as_array() { + if let Some((method_name, _)) = val.try_as_reference() { + if let Some(llvm_method) = context.module.get_function(method_name.as_str()) { + let first_eval = !context.method_graphs.contains_key(method_name.as_str()); + let mut builder = self.walk_function(&llvm_method, context); + if first_eval { + for exit in builder.exit_points() { + let ret_node = Ptr::from(Instruction::Return(Ptr::from(Value::Ref("%result-tuple".to_string(), None)))); + builder.add_with_edge(ret_node.borrow(), exit.borrow(), None, None); + } + } + + if first.is_none() { + first = Some(builder.graph.clone()); + } + } + } + } + + first + }) + }; + + // X is mapped as 1 instead of -1 in the test files we have. Fix-up for now. + let fix_pauli = |mut pauli: Value| -> Value { + if let Value::Int(mut i) = pauli { + if i == 1 { + i = -1 + } + + pauli = Value::Pauli(Pauli::from_num(&(i as i8))); + } + + return pauli + }; + + // Expands the qis__ctrl argument tuples out. + let expand_arg_tuple = |tuple_index: u32|-> (String, String, String) { + // The arguments are in a tuple: controllers, (pauli, target, rotation). + // Extract and expand. + let target_tuple = parse_as_value(inst, tuple_index).expect("Need tuple to flatten."); + let tuple_var = context.next_throwaway(); + graph.Assign(tuple_var.clone(), target_tuple); + let pauli = context.next_throwaway(); + graph.Assign(pauli.clone(), Value::Ref(tuple_var.clone(), Some(Ptr::from(Value::Int(0))))); + let rotation = context.next_throwaway(); + graph.Assign(rotation.clone(), Value::Ref(tuple_var.clone(), Some(Ptr::from(Value::Int(1))))); + let target = context.next_throwaway(); + graph.Assign(target.clone(), Value::Ref(tuple_var, Some(Ptr::from(Value::Int(2))))); + (pauli, target, rotation) + }; + + match name.as_str() { + // Rotations + "__quantum__qis__r__body" => { + let mut pauli = parse_as_value(inst, 0).expect("Can't find a pauli."); + pauli = fix_pauli(pauli); + + let rotation = parse_as_value(inst, 1).expect("Can't find a rotation."); + let qubit = parse_as_value(inst, 2).expect("Can't find a qubit."); + graph.R(pauli, qubit, rotation); + }, + "__quantum__qis__r__ctl" => { + let control = parse_qubit(inst, 0); + let (pauli, target, rotation) = expand_arg_tuple(1); + graph.CR( + Value::Ref(pauli, None), + control, + Value::Ref(target, None), + Value::Ref(rotation, None)); + }, + "__quantum__qis__r__adj" => { + let controls = parse_as_value(inst, 0).expect("Can't find controls."); + let qubit = parse_as_value(inst, 1).expect("Can't find a qubit."); + let rotation = parse_as_value(inst, 2).expect("Can't find a rotation."); + graph.CR( + Value::Pauli(Pauli::Z), + controls, + qubit, + rotation + ); + }, + "__quantum__qis__r__ctladj" => { + let controls = parse_as_value(inst, 0).expect("Can't find controls."); + let (pauli, target, rotation) = expand_arg_tuple(1); + let throwaway = context.next_throwaway(); + + graph.Expression(Expression::NegateSign(Value::Ref(rotation, None)), Some(throwaway.clone())); + graph.CR( + Value::Pauli(Pauli::Z), + controls, + Value::Ref(target, None), + Value::Ref(throwaway, None) + ); + }, + "__quantum__qis__h__body" => { + let qubit = parse_qubit(inst, 0); + graph.Z(qubit.clone(), PI); + graph.Y(qubit, PI/2.0); + }, + "__quantum__qis__h__ctl" => { + let controllers = parse_as_value(inst, 0).expect("Couldn't resolve control qubits."); + let target = parse_qubit(inst, 1); + graph.CZ(controllers.clone(), target.clone(), PI); + graph.CY(controllers, target, PI/2.0); + }, + "__quantum__qis__s__body" => { + let qb = parse_qubit(inst, 0); + graph.Z(qb, PI/2.0); + }, + "__quantum__qis__s__adj" => { + let qb = parse_qubit(inst, 0); + graph.Z(qb, -(PI/2.0)); + }, + "__quantum__qis__s__ctl" => { + let controllers = parse_as_value(inst, 0).expect("Need control qubits."); + let qb = parse_qubit(inst, 1); + graph.CZ(controllers, qb, PI/2.0); + }, + "__quantum__qis__s__ctladj" => { + let controllers = parse_as_value(inst, 0).expect("Need control qubits."); + let qb = parse_qubit(inst, 1); + graph.CZ(controllers, qb, -PI/2.0); + }, + "__quantum__qis__t__body" => { + let qb = parse_qubit(inst, 0); + graph.Z(qb, PI/4.0); + }, + "__quantum__qis__t__adj" => { + let qb = parse_qubit(inst, 0); + graph.Z(qb, -(PI/4.0)); + }, + "__quantum__qis__t__ctl" => { + let controllers = parse_as_value(inst, 0).expect("Need control qubits."); + let qb = parse_qubit(inst, 0); + graph.CZ(controllers, qb, PI/4.0); + }, + "__quantum__qis__t__ctladj" => { + let controllers = parse_as_value(inst, 0).expect("Need control qubits."); + let qb = parse_qubit(inst, 0); + graph.CZ(controllers, qb, -PI/4.0); + }, + "__quantum__qis__x__body" => { + let qb = parse_qubit(inst, 0); + graph.X(qb, PI); + }, + "__quantum__qis__x__adj" => { + let qb = parse_qubit(inst, 0); + graph.X(qb, -PI); + }, + "__quantum__qis__x__ctl" => { + let control = parse_as_value(inst, 0).expect("Need control qubits."); + let target = parse_qubit(inst, 1); + graph.CX(control, target, PI); + } + "__quantum__qis__y__body" => { + let qb = parse_qubit(inst, 0); + graph.Y(qb, PI); + }, + "__quantum__qis__y__adj" => { + let qb = parse_qubit(inst, 0); + graph.Y(qb, -PI); + }, + "__quantum__qis__y__ctl" => { + let control = parse_as_value(inst, 0).expect("Need control qubits."); + let target = parse_qubit(inst, 1); + graph.CY(control, target, PI); + } + "__quantum__qis__z__body" => { + let qb = parse_qubit(inst, 0); + graph.Z(qb, PI); + }, + "__quantum__qis__z__adj" => { + let qb = parse_qubit(inst, 0); + graph.Z(qb, -PI); + }, + "__quantum__qis__z__ctl" => { + let control = parse_as_value(inst, 0).expect("Need control qubits."); + let target = parse_qubit(inst, 1); + graph.CZ(control, target, PI); + } + "__quantum__qis__cnot__body" => { + let control = parse_qubit(inst, 0); + let target = parse_qubit(inst, 1); + graph.CX(control, target, PI); + }, + "__quantum__qis__rx__body" => { + let qubit = parse_as_value(inst, 1).expect("Can't find a qubit."); + let rotation = parse_as_value(inst, 0).expect("Can't find a rotation."); + graph.R(Value::Pauli(Pauli::X), qubit, rotation); + }, + "__quantum__qis__rx__adj" => { + let qubit = parse_as_value(inst, 0).expect("Can't find a qubit."); + let rotation = parse_as_value(inst, 1).expect("Can't find a rotation."); + let throwaway = context.next_throwaway(); + + graph.Expression(Expression::NegateSign(rotation), Some(throwaway.clone())); + graph.R( + Value::Pauli(Pauli::X), + qubit, + Value::Ref(throwaway, None) + ); + }, + "__quantum__qis__rx__ctl" => { + let controls = parse_as_value(inst, 0).expect("Can't find controls."); + let (pauli, target, rotation) = expand_arg_tuple(1); + graph.CR( + Value::Pauli(Pauli::X), + controls, + Value::Ref(target, None), + Value::Ref(rotation, None) + ); + }, + "__quantum__qis__rx__ctladj" => { + let controls = parse_as_value(inst, 0).expect("Can't find controls."); + let (pauli, target, rotation) = expand_arg_tuple(1); + let throwaway = context.next_throwaway(); + + graph.Expression(Expression::NegateSign(Value::Ref(rotation, None)), Some(throwaway.clone())); + graph.CR( + Value::Pauli(Pauli::X), + controls, + Value::Ref(target, None), + Value::Ref(throwaway, None) + ); + }, + "__quantum__qis__ry__body" => { + let qubit = parse_as_value(inst, 1).expect("Can't find a qubit."); + let rotation = parse_as_value(inst, 0).expect("Can't find a rotation."); + graph.R(Value::Pauli(Pauli::Y), qubit, rotation); + }, + "__quantum__qis__ry__adj" => { + let qubit = parse_as_value(inst, 0).expect("Can't find a qubit."); + let rotation = parse_as_value(inst, 1).expect("Can't find a rotation."); + let throwaway = context.next_throwaway(); + + graph.Expression(Expression::NegateSign(rotation), Some(throwaway.clone())); + graph.R( + Value::Pauli(Pauli::Y), + qubit, + Value::Ref(throwaway, None) + ); + }, + "__quantum__qis__ry__ctl" => { + let controls = parse_as_value(inst, 0).expect("Can't find controls."); + let (pauli, target, rotation) = expand_arg_tuple(1); + graph.CR( + Value::Pauli(Pauli::Y), + controls, + Value::Ref(target, None), + Value::Ref(rotation, None) + ); + }, + "__quantum__qis__ry__ctladj" => { + let controls = parse_as_value(inst, 0).expect("Can't find controls."); + let (pauli, target, rotation) = expand_arg_tuple(1); + let throwaway = context.next_throwaway(); + + graph.Expression(Expression::NegateSign(Value::Ref(rotation, None)), Some(throwaway.clone())); + graph.CR( + Value::Pauli(Pauli::Y), + controls, + Value::Ref(target, None), + Value::Ref(throwaway, None) + ); + }, + "__quantum__qis__rz__body" => { + let qubit = parse_as_value(inst, 1).expect("Can't find a qubit."); + let rotation = parse_as_value(inst, 0).expect("Can't find a rotation."); + graph.R(Value::Pauli(Pauli::Z), qubit, rotation); + }, + "__quantum__qis__rz__adj" => { + let qubit = parse_as_value(inst, 0).expect("Can't find a qubit."); + let rotation = parse_as_value(inst, 1).expect("Can't find a rotation."); + let throwaway = context.next_throwaway(); + + graph.Expression(Expression::NegateSign(rotation), Some(throwaway.clone())); + graph.R( + Value::Pauli(Pauli::Z), + qubit, + Value::Ref(throwaway, None) + ); + }, + "__quantum__qis__rz__ctl" => { + let controls = parse_as_value(inst, 0).expect("Can't find controls."); + let (pauli, target, rotation) = expand_arg_tuple(1); + graph.CR( + Value::Pauli(Pauli::Z), + controls, + Value::Ref(target, None), + Value::Ref(rotation, None) + ); + }, + "__quantum__qis__rz__ctladj" => { + let controls = parse_as_value(inst, 0).expect("Can't find controls."); + let (pauli, target, rotation) = expand_arg_tuple(1); + let throwaway = context.next_throwaway(); + + graph.Expression(Expression::NegateSign(Value::Ref(rotation, None)), Some(throwaway.clone())); + graph.CR( + Value::Pauli(Pauli::Z), + controls, + Value::Ref(target, None), + Value::Ref(throwaway, None) + ); + }, + "__quantum__qis__measure__body" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let bases = parse_as_value(inst, 0).expect("Can't resolve measure basis."); + let qubits = parse_as_value(inst, 1).expect("Can't resolve measure qubits."); + + graph.Measure(bases, qubits,Value::String(ref_id)); + } + "__quantum__qis__m__body" | + "__quantum__qis__mz__body" => { + let target_value = if let Some(val) = parse_ref_id_from_instruction(inst.borrow()) { + Value::String(val) + } else { + parse_as_value(inst, 1).expect("Can't find result register.") + }; + + let qb = parse_qubit(inst, 0); + graph.Measure(Value::Pauli(Pauli::Z), qb, target_value); + }, + "__quantum__qis__cx__body" => { + let control = parse_qubit(inst, 0); + let target = parse_qubit(inst, 1); + graph.CR( + Value::Pauli(Pauli::X), + control, + target, + Value::Float(PI) + ); + }, + "__quantum__qis__cz__body" => { + let control = parse_qubit(inst, 0); + let target = parse_qubit(inst, 1); + graph.CR( + Value::Pauli(Pauli::Z), + control, + target, + Value::Float(PI) + ); + }, + "__quantum__qis__ccx__body" => { + let control_one = parse_qubit(inst, 0); + let control_two = parse_qubit(inst, 1); + let target = parse_qubit(inst, 2); + graph.CR( + Value::Pauli(Pauli::Z), + Value::Array(vec![Ptr::from(control_one), Ptr::from(control_two)]), + target, + Value::Float(PI) + ); + }, + + // Results/initialize + "__quantum__rt__initialize" => { + graph.Initialize(); + }, + "__quantum__rt__fail" => { + let message = parse_as_value(inst, 0); + graph.Throw(message); + } + "__quantum__rt__result_record_output" => { + // Base profiles only have one method, so don't need to care about child + // contexts. + if !context.is_base_profile.deref() { + with_mutable!(context.is_base_profile.expand_into(&Ptr::from(true))) + } + }, + "__quantum__rt__string_equal" | + "__quantum__rt__result_equal" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let left = parse_as_value(inst, 0).expect("Left comparison result unresolvable."); + let right = parse_as_value(inst, 1).expect("Right comparison result unresolvable."); + + graph.Condition(ref_id, left, Equalities::Equals, right); + }, + "__quantum__rt__result_get_one" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, Value::Int(1)); + }, + "__quantum__rt__result_get_zero" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, Value::Int(0)); + }, + "__quantum__rt__result_to_string" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let val = parse_as_value(inst, 0).expect("Can't resolve value."); + graph.Expression(Expression::Stringify(val), Some(ref_id)); + }, + + // Qubit operations + "__quantum__rt__qubit_allocate" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.ActivateQubit(ref_id, None); + }, + "__quantum__rt__qubit_allocate_array" => { + let qubit_numbers = parse_as_value(inst, 0).expect("Qubit array count unresolved."); + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.ActivateQubit(ref_id, Some(qubit_numbers)); + }, + "__quantum__rt__qubit_release" | + "__quantum__rt__qubit_release_array" => { + let deactivated_qubit = parse_qubit(inst, 0); + graph.DeactivateQubit(deactivated_qubit); + }, + + // General utilities + "__quantum__rt__message" => { + let log_message = parse_as_value(inst, 0).expect("Can't find message value."); + graph.Log(log_message); + }, + + // Array operators. Hope these all go at some point. + "__quantum__rt__array_copy" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let copy_target = parse_as_value(inst, 0).expect("Should be a reference."); + graph.Expression(Expression::Clone(copy_target), Some(ref_id)); + }, + "__quantum__rt__array_create" | + "__quantum__rt__array_create_1d" => { + // We don't care about sizes, we dynamically allocate them anyway. + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, Value::Array(Vec::new())); + }, + "__quantum__rt__array_get_element_ptr" | + "__quantum__rt__array_get_element_ptr_1d" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let new_result: Vec> = Vec::new(); + + let target = parse_as_value(inst, 0) + .expect("Target of array access unresolvable.").as_reference(); + let index = parse_as_value(inst, 1).expect("Index unresolvable."); + + graph.Assign(ref_id.clone(), Value::Ref(target.0, Some(Ptr::from(index)))); + }, + "__quantum__rt__array_get_size" | + "__quantum__rt__array_get_size_1d" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let length_target = parse_as_value(inst, 0).expect("Should be a reference."); + graph.Expression(Expression::Length(length_target), Some(ref_id)); + }, + "__quantum__rt__callable_copy" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let val = parse_as_value(inst, 0) + .expect("Can't resolve value."); + + // No need to copy anything, we don't assign state. + graph.Assign(ref_id, val); + }, + "__quantum__rt__callable_create" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let method = parse_as_value(inst, 0).expect("Can't find global callable array."); + let callable_lambda = if let Value::Ref(call, _) = method.borrow() { + parse_default_callable(call) + } else { None }; + + if let Some(lambda) = callable_lambda { + let stored = parse_as_value(inst, 2).expect("Can't find stored value."); + let mut subgraph = CallableAnalysisGraph::new(&lambda); + subgraph.argument_mappings.insert("%capture-tuple".to_string(), Ptr::from(stored)); + subgraph.argument_mappings.insert("%result-tuple".to_string(), Ptr::from(Value::Array(Vec::new()))); + graph.Assign(ref_id, Value::Callable(Ptr::from(subgraph))); + } else { + panic!("Unable to resolve callable initialization."); + } + }, + "__quantum__rt__callable_invoke" => { + let method = parse_as_value(inst, 0).expect("Can't find callable."); + let args= parse_as_value(inst, 1).expect("Can't find argument."); + let results = parse_as_value(inst, 2).expect("Can't find results."); + + let results = match results { + Value::Ref(ref_, _) => Some(ref_.clone()), + _ => None + }; + + graph.Expression( + Expression::ArgInjection( + method.clone(), + if args == Value::Empty { None } else { Some(args) }), + None); + + // Call a subgraph with our dynamic callable. + graph.Subgraph(method, results); + }, + "__quantum__rt__callable_make_adjoint" => { + let method = parse_as_value(inst, 0).expect("Can't find callable."); + graph.Expression(Expression::MakeCtrlAdj(method, LambdaModifier::Adj), None); + }, + "__quantum__rt__callable_make_controlled" => { + let method = parse_as_value(inst, 0).expect("Can't find callable."); + graph.Expression(Expression::MakeCtrlAdj(method, LambdaModifier::Ctl), None); + }, + + // We ignore alias counts, no need. + "__quantum__rt__callable_update_alias_count" | + "__quantum__rt__callable_update_reference_count" | + "__quantum__rt__capture_update_alias_count" | + "__quantum__rt__capture_update_reference_count" | + "__quantum__rt__array_update_alias_count" | + "__quantum__rt__array_update_reference_count" | + "__quantum__rt__result_update_reference_count" | + "__quantum__rt__string_update_reference_count" | + "__quantum__rt__tuple_update_alias_count" | + "__quantum__rt__bigint_update_reference_count" | + "__quantum__rt__tuple_update_reference_count" => { }, + + // All to-string operations are the same for us, just stringify the value. + "__quantum__rt__bool_to_string" | + "__quantum__rt__bigint_to_string" | + "__quantum__rt__double_to_string" | + "__quantum__rt__int_to_string" | + "__quantum__rt__pauli_to_string" | + "__quantum__rt__qubit_to_string" | + "__quantum__rt__range_to_string" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let val = parse_as_value(inst, 0) + .expect("Can't resolve value."); + graph.Expression(Expression::Stringify(val), Some(ref_id)); + }, + "__quantum__rt__string_concatenate" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let left_string = parse_as_value(inst, 0) + .expect("Can't resolve string value."); + let right_string = parse_as_value(inst, 1) + .expect("Can't resolve string value."); + + graph.Arithmatic(ref_id, left_string, Operator::Add, right_string); + }, + "__quantum__rt__string_create" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + let created_string = parse_as_value(inst, 0) + .expect("Can't resolve string creation target."); + + graph.Assign(ref_id, created_string); + }, + "__quantum__rt__tuple_create" => { + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, Value::Array(Vec::new())); + }, + + // Bigint support that hopefully we'll just be able to ignore. + "__quantum__rt__bigint_add" | + "__quantum__rt__bigint_bitand" | + "__quantum__rt__bigint_bitnot" | + "__quantum__rt__bigint_bitor" | + "__quantum__rt__bigint_bitxor" | + "__quantum__rt__bigint_create_array" | + "__quantum__rt__bigint_create_i64" | + "__quantum__rt__bigint_divide" | + "__quantum__rt__bigint_equal" | + "__quantum__rt__bigint_get_data" | + "__quantum__rt__bigint_get_length" | + "__quantum__rt__bigint_greater" | + "__quantum__rt__bigint_greater_eq" | + "__quantum__rt__bigint_modulus" | + "__quantum__rt__bigint_multiply" | + "__quantum__rt__bigint_negate" | + "__quantum__rt__bigint_power" | + "__quantum__rt__bigint_shiftleft" | + "__quantum__rt__bigint_shiftright" | + "__quantum__rt__bigint_subtract" | + "__quantum__rt__array_project" | + "__quantum__rt__array_slice" | + "__quantum__rt__array_slice_1d" | + "__quantum__rt__array_get_dim" | + "__quantum__rt__array_concatenate" | + "__quantum__rt__tuple_record_output" | + "__quantum__rt__array_record_output" | + "__quantum__rt__string_get_data" | + "__quantum__rt__string_get_length" | + "__quantum__rt__tuple_copy" | + _ => { warn!("Attempted to process unknown intrinsic {}.", name)} + } + + None + } + + fn eval_call(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let method_name = self.get_method_name(inst.borrow()).expect("Can't resolve method name of call operation."); + let called_func = context.module.get_function(method_name.as_str()); + if called_func.is_none() || called_func.unwrap().get_basic_blocks().len() == 0 { + self.eval_intrinsic(method_name, inst, graph, context); + } else { + let func = called_func.unwrap(); + + let mut args = HashMap::new(); + let mut index = 0; + let loops = inst.get_num_operands() -1; + while index < loops { + let param = func.get_nth_param(index).unwrap().to_string(); + let param_ref_id = get_ref_id_from_value(param.clone()); + let value = self.as_value_ptr(operand_to_value!(inst, index.clone()), graph, context).expect("Unable to resolve value."); + args.insert(param_ref_id, value); + index += 1; + } + + let builder = self.walk_function(func.borrow(), context); + let mut subgraph = + Ptr::from(CallableAnalysisGraph::new(&builder.graph)); + + // Add specific args to this particular call. + subgraph.argument_mappings = args; + + let target_var = parse_ref_id_from_instruction(inst); + graph.Subgraph(Value::Callable(subgraph), target_var); + } + } + + fn eval_int_to_ptr(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let val = self.as_value(inst.as_any_value_enum().borrow_mut(), graph, context).expect("Int to pointer unresolvable."); + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, val); + } + + /// We implicitly convert types on use, so as long as they aren't wildly different no need for static casts. + fn eval_numeric_cast(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let val = self.as_value(operand_to_value!(inst, 0), graph, context).expect("Int to pointer unresolvable."); + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, val); + } + + fn eval_bitcast(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let val = self.as_value(operand_to_value!(inst, 0), graph, context).expect("Bitcast value unresolvable."); + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, val); + } + + fn eval_trunc(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let val = self.as_value(operand_to_value!(inst, 0), graph, context).expect("Truncate value unresolvable."); + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, val); + } + + /// Load is meaningless for us, as is alignment and memory metadata. Just treat it as an assign. + fn eval_load(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let val = self.as_value(operand_to_value!(inst, 0), graph, context).expect("Load value unresolvable."); + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Assign(ref_id, val); + } + + fn eval_store(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let value = self.as_value(operand_to_value!(inst, 0), graph, context).expect("Store value unresolvable."); + + // The argument resolves to another variable which we want to just directly assign too. + let target_variable_str = get_ref_id_from_instruction(inst.get_operand(1).unwrap().left().unwrap().as_instruction_value().expect("Has to be storing in another variable.").borrow()); + graph.Assign(target_variable_str, value); + } + + fn eval_branch(&self, inst: &Ptr, graph: &Ptr, + context: &Ptr) { + let op_count = inst.get_num_operands(); + let branch_basic_block = inst.get_parent().unwrap().get_name().to_str().unwrap().to_string(); + let last_node = with_mutable!(graph.auto_attach_target.borrow_mut()); + + // Walks the branches outwards-going edge to work out what values the phi node would have + // assigned, and then turn those into our own edge assignments instead. + // + // This means phi nodes don't really have any evaluation, because all branches will be + // dealing with the conditional themselves. + let get_assignment = |bb: &BasicBlock| -> Option> { + let mut results = Vec::new(); + let mut potential_phi = bb.get_first_instruction(); + let mut is_phi = true; + while is_phi { + if let Some(phi) = potential_phi { + match phi.get_opcode() { + InstructionOpcode::Phi => { + let inst_string = phi.to_string(); + + // Do a dirty match to find the basic block names. + let bb_finder = Regex::new(", %([^\\]]+?)\\]+").unwrap(); + let capture_groups: Vec = bb_finder.captures_iter( + inst_string.as_str()).map(|val| val.get(1).unwrap().as_str().trim().to_string()).collect(); + + // The value in the operand is the instruction linking to the value that gets + // assigned if we're coming from a particular basic-block. So find the assignment + // that is for the branch we're currently looking at and return it. + let ref_id = get_ref_id_from_instruction(phi.borrow()); + let operands = phi.get_num_operands(); + let mut i = 0; + while i < operands { + let basic_block = capture_groups.get(i as usize).expect("Can't find the name of the basic block.").clone(); + if basic_block == branch_basic_block { + let val = self.as_value(operand_to_value!(phi, i), graph, context).expect("Can't resolve phi node references."); + results.push((ref_id, val.clone())); + break; + } + i += 1; + } + } + _ => { + is_phi = false; + } + }; + + potential_phi = phi.get_next_instruction(); + } else { + is_phi = false; + } + } + + if results.is_empty() { + None + } else { + Some(results) + } + }; + + // Unconditional. + if op_count == 1 { + let basic_block = operand_to_bb!(inst, 0); + let target = basic_block.get_name().to_str().unwrap().to_string(); + let target = with_mutable!(context.anchors.get_mut(target.as_str()).expect("Node should exist.")); + let assignments = get_assignment(basic_block.borrow()); + with_mutable!(graph.add_edge(last_node.borrow_mut(), target, assignments, None)); + } else { + // Conditions 'seem' to always be a reference to another result, this just casts it to a bool. + // But can't discount just having a flat true/false value. + let condition = self.as_value( + operand_to_value!(inst, 0), + graph, + context).expect("Conditional unable to be evaluated for branch."); + let false_block = operand_to_bb!(inst, 1); + let true_block = operand_to_bb!(inst, 2); + + let true_name = true_block.get_name().to_str().unwrap(); + let false_name = false_block.get_name().to_str().unwrap(); + + let true_branch = with_mutable!(context.anchors.get_mut(true_name).expect("Should exist.")); + let false_branch = with_mutable!(context.anchors.get_mut(false_name).expect("Should exist.")); + + let true_assignments = get_assignment(true_block.borrow()); + let false_assignments = get_assignment(false_block.borrow()); + + // We model branches as a conditional outwards edge if the condition is true, otherwise unconditional out. + // All edge conditions should be evaluated before the unconditional, as it acts as a fall-back. + with_mutable!(graph.add_edge(last_node.borrow_mut(), true_branch.borrow_mut(), true_assignments, + Some(Condition::new(condition, Equalities::Equals, Value::Bool(true))))); + with_mutable!(graph.add_edge(last_node.borrow_mut(), false_branch.borrow_mut(), false_assignments, None)); + } + } + + fn eval_icmp(&self, inst: &Ptr, graph: &Ptr, + context: &Ptr) { + let operator = if let Some(pred) = inst.get_fcmp_predicate() { + match pred { + FloatPredicate::OEQ => Equalities::Equals, + FloatPredicate::ONE => Equalities::NotEquals, + FloatPredicate::UGT => Equalities::GreaterThan, + FloatPredicate::UGE => Equalities::GreaterOrEqualThan, + FloatPredicate::ULT => Equalities::LessThan, + FloatPredicate::ULE => Equalities::LessOrEqualThan, + FloatPredicate::OGT => Equalities::GreaterThan, + FloatPredicate::OGE => Equalities::GreaterOrEqualThan, + FloatPredicate::OLT => Equalities::LessThan, + FloatPredicate::OLE => Equalities::LessOrEqualThan, + _ => panic!("Untranslatable fcompare.") + } + } else { + if let Some(pred) = inst.get_icmp_predicate() { + match pred { + IntPredicate::EQ => Equalities::Equals, + IntPredicate::NE => Equalities::NotEquals, + IntPredicate::UGT => Equalities::GreaterThan, + IntPredicate::UGE => Equalities::GreaterOrEqualThan, + IntPredicate::ULT => Equalities::LessThan, + IntPredicate::ULE => Equalities::LessOrEqualThan, + IntPredicate::SGT => Equalities::GreaterThan, + IntPredicate::SGE => Equalities::GreaterOrEqualThan, + IntPredicate::SLT => Equalities::LessThan, + IntPredicate::SLE => Equalities::LessOrEqualThan, + } + } else { + panic!("Comparison operator that looks strange.") + } + }; + + let left = self.as_value( + operand_to_value!(inst, 0), graph, context) + .expect("Can't resolve left side of icmp."); + + let right = self.as_value( + operand_to_value!(inst, 1), graph, context) + .expect("Can't resolve right side of icmp."); + + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Condition(ref_id, left, operator, right); + } + + fn add_arithmatic_op(&self, op: Operator, inst: &Ptr, graph: &Ptr, + context: &Ptr){ + let lhs = self.as_value( + operand_to_value!(inst, 0), graph, context) + .expect(format!("Can't resolve left side of {}.", op.to_string()).as_str()); + let lhs_as_int = match lhs { + Value::Int(i) => Some(i), + _ => None + }; + + let rhs = self.as_value( + operand_to_value!(inst, 1), graph, context) + .expect(format!("Can't resolve right side of {}.", op.to_string()).as_str()); + let rhs_as_int = match rhs { + Value::Int(i) => Some(i), + _ => None + }; + + let ref_id = get_ref_id_from_instruction(inst.borrow()); + graph.Arithmatic(ref_id, lhs, op, rhs); + } + + fn eval_mul(&self, inst: &Ptr, graph: &Ptr, + context: &Ptr) { + self.add_arithmatic_op(Operator::Multiply, inst, graph, context); + } + + fn eval_div(&self, inst: &Ptr, graph: &Ptr, + context: &Ptr) { + self.add_arithmatic_op(Operator::Divide, inst, graph, context); + } + + fn eval_sub(&self, inst: &Ptr, graph: &Ptr, + context: &Ptr) { + self.add_arithmatic_op(Operator::Subtract, inst, graph, context); + } + + fn eval_add(&self, inst: &Ptr, graph: &Ptr, + context: &Ptr) { + self.add_arithmatic_op(Operator::Add, inst, graph, context); + } + + fn eval_insertvalue(&self, inst: &Ptr, graph: &Ptr, + context: &Ptr) { + // TODO: Don't double-up stringification from get_ref_x. + let inst_str = inst.to_string(); + let inst_str = inst_str.trim_matches('"').trim(); + + let mut target_ref = get_ref_id_from_instruction(inst); + let target_composite = self.as_value(operand_to_value!(inst, 0), graph, context) + .expect("Can't resolve composite to insert into."); + + let insert_value = self.as_value(operand_to_value!(inst, 1), graph, context) + .expect("Can't resolve value to insert."); + + // TODO: Have to extract indexers via regex/string comparison because they're not exposed + // as an operand for some reason. + let mut index_values = Vec::new(); + for indexer in inst_str.split(',').rev() { + let indexer = indexer.trim(); + if Regex::new("^[0-9]+$").unwrap().is_match(indexer) { + index_values.push(indexer.parse::().expect(format!("Unable to parse {} as an int", indexer).as_str())); + } + } + index_values.reverse(); + + let mut throwaway_var = context.next_throwaway(); + + // Assign our referenced/new object to the target variable. + graph.Assign(target_ref.clone(), target_composite); + + // Pull out the element we want to change with an indexer reference. + for index in index_values { + graph.Assign(throwaway_var.clone(), Value::Ref(target_ref, Some(Ptr::from(Value::Int(index))))); + target_ref = throwaway_var; + throwaway_var = context.next_throwaway(); + } + + // Directly change that element with the value we want to insert. + graph.Assign(target_ref, insert_value); + } + + fn eval_extractvalue(&self, inst: &Ptr, graph: &Ptr, + context: &Ptr) { + // TODO: Don't double-up stringification from get_ref_x. + let inst_str = inst.to_string(); + let inst_str = inst_str.trim_matches('"').trim(); + + let target_ref = get_ref_id_from_instruction(inst); + let target_composite = self.as_value(operand_to_value!(inst, 0), graph, context) + .expect("Can't resolve composite to extract from."); + + // TODO: Same as insertvalue, find way around this. + let mut index_values = Vec::new(); + for indexer in inst_str.split(',').rev() { + let indexer = indexer.trim(); + if Regex::new("^[0-9]+$").unwrap().is_match(indexer) { + index_values.push(indexer.parse::().expect(format!("Unable to parse {} as an int", indexer).as_str())); + } + } + index_values.reverse(); + + let mut throwaway_var = context.next_throwaway(); + graph.Assign(throwaway_var.clone(), target_composite); + + // Pull out the element we want to change with an indexer reference. + for index in index_values { + let next_throwaway = context.next_throwaway(); + graph.Assign(next_throwaway.clone(), Value::Ref(throwaway_var, Some(Ptr::from(Value::Int(index))))); + throwaway_var = next_throwaway; + } + + // Directly extract from our composite the object we want. + graph.Assign(target_ref.clone(), Value::Ref(throwaway_var, None)); + } + + /// The GEP instruction is special in that it only deals with pointer addresses, nothing more. + /// This becomes very simple for us because address == the object itself in our model of the + /// world, so we just chain indexer operations repeatedly on the same object and let the + /// runtime resolve the type nuances. + fn eval_getelementptr(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let target_ref = get_ref_id_from_instruction(inst); + let extracted_ref = self.extract_gep(inst.as_any_value_enum().borrow(), graph, context) + .expect("Couldn't extract getelementptr instruction."); + + graph.Assign(target_ref.clone(), extracted_ref); + } + + fn eval_select(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let target_ref = get_ref_id_from_instruction(inst); + let condition = self.as_value(operand_to_value!(inst, 0), graph, context) + .expect("Couldn't get target."); + + let true_value = self.as_value(operand_to_value!(inst, 1), graph, context) + .expect("Couldn't get true select value."); + + let false_value = self.as_value(operand_to_value!(inst, 2), graph, context) + .expect("Couldn't get false select value."); + + // We just use edge-assignments to simulate the select. So we have two edges that connect these two nodes - one which has a + // condition, the other which is the default path, assigning correctly along each. + let mut last_node = graph.next_auto_attach().clone(); + let attach_node = Ptr::from(Instruction::NoOp); + let mut added_node = with_mutable!(graph.add_with_edge( + &attach_node, &mut last_node, Some(vec![(target_ref.clone(), false_value)]), None)); + + with_mutable!(graph.add_edge(&mut last_node, &mut added_node, Some(vec![(target_ref, true_value)]), + Some(Condition::new(condition, Equalities::Equals, Value::Bool(true))))); + } + + fn eval_bitwise(&self, op: Operator, inst: &Ptr, + graph: &Ptr, context: &Ptr) { + let target_ref = get_ref_id_from_instruction(inst); + let lhs = self.as_value(operand_to_value!(inst, 0), graph, context) + .expect("Couldn't get true select value."); + + let rhs = self.as_value(operand_to_value!(inst, 1), graph, context) + .expect("Couldn't get false select value."); + + graph.Arithmatic(target_ref, lhs, op, rhs); + } + + fn eval_or(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + self.eval_bitwise(Operator::Or, inst, graph, context); + } + + fn eval_and(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + self.eval_bitwise(Operator::And, inst, graph, context); + } + + fn eval_xor(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + self.eval_bitwise(Operator::Xor, inst, graph, context); + } + + fn eval_ret(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + if inst.get_num_operands() == 1 { + let results= + self.as_value(operand_to_value!(inst, 0), graph, context) + .expect("Can't resolve result."); + + graph.Return(results); + } + } + + fn eval_fneg(&self, inst: &Ptr, graph: &Ptr, context: &Ptr) { + let target = get_ref_id_from_instruction(inst); + let value = self.as_value( + operand_to_value!(inst, 0), graph, context) + .expect("Can't resolve float for sign-flip."); + + graph.Expression(Expression::NegateSign(value), Some(target)); + } +} diff --git a/src/munchkin/pykin/src/execution.rs b/src/munchkin/pykin/src/execution.rs new file mode 100644 index 0000000..b01eada --- /dev/null +++ b/src/munchkin/pykin/src/execution.rs @@ -0,0 +1,191 @@ +#![deny(clippy::all, clippy::pedantic)] + +use inkwell::{ + context::Context, + memory_buffer::MemoryBuffer, + module::Module, + OptimizationLevel, + passes::{PassManager, PassManagerBuilder}, + targets::{InitializationConfig, Target}, +}; +use std::{ffi::OsStr, path::Path}; +use std::borrow::{Borrow}; +use inkwell::values::FunctionValue; +use inkwell::attributes::AttributeLoc; +use crate::builders::PythonEngine; +use crate::evaluator::QIREvaluator; +use crate::graphs::ExecutableAnalysisGraph; +use crate::instructions::Value; +use crate::runtime::{ActiveTracers, QuantumRuntime, TracingModule}; +use crate::smart_pointers::Ptr; + +pub fn run_file(path: impl AsRef, args: &Vec, engine: &Ptr, + entry_point: Option<&str>, tracer: ActiveTracers) -> Result>, String> { + run_graph(&parse_file(path, entry_point)?, args, engine, tracer) +} + +pub fn parse_file(path: impl AsRef, entry_point: Option<&str>) -> Result, String> { + let context = Context::create(); + let module = file_to_module(path, &context)?; + build_graph_from_module(&module, entry_point) +} + +pub fn file_to_module(path: impl AsRef, context: &Context) -> Result { + let path = path.as_ref(); + let extension = path.extension().and_then(OsStr::to_str); + + match extension { + Some("ll") => MemoryBuffer::create_from_file(path) + .and_then(|buffer| context.create_module_from_ir(buffer)) + .map_err(|e| e.to_string()), + Some("bc") => Module::parse_bitcode_from_path(path, context).map_err(|e| e.to_string()), + _ => Err(format!("Unsupported file extension '{:?}'.", extension)), + } +} + +pub fn build_graph_from_module(module: &Module, entry_point: Option<&str>) -> Result, String> { + module.verify() + .map_err(|e| format!("Failed to verify module: {}", e.to_string()))?; + + let pass_manager_builder = PassManagerBuilder::create(); + pass_manager_builder.set_optimization_level(OptimizationLevel::None); + + let fpm = PassManager::create(()); + fpm.add_global_dce_pass(); + fpm.add_strip_dead_prototypes_pass(); + pass_manager_builder.populate_module_pass_manager(&fpm); + fpm.run_on(module); + + Target::initialize_native(&InitializationConfig::default())?; + inkwell::support::load_library_permanently(&Path::new("")); + + let evaluator = QIREvaluator::new(); + evaluator.evaluate( + &choose_entry_point(module_functions(module.borrow()), entry_point)?, + &Ptr::from(module)) +} + +pub fn run_graph(graph: &Ptr, arguments: &Vec, engine: &Ptr, tracer: ActiveTracers) -> Result>, String> { + let engines = Ptr::from(EngineCollection::from(engine)); + let mut runtime = QuantumRuntime::new(engines.borrow(), tracer); + runtime.execute(graph.borrow(), arguments) +} + +/// Top-level collection item that holds information about target runtimes and engines for graphs. +pub struct EngineCollection { + python_engine: Ptr +} + +/// We don't have a 'new' because later on this will be a proper collection, but will have a +/// helper for creating from a single engine instance. +impl EngineCollection { + pub fn from(python_engine: &Ptr) -> EngineCollection { + EngineCollection {python_engine: python_engine.clone()} + } + + pub fn get_available_QPU(&self) -> Ptr { + self.python_engine.clone() + } +} + +impl Default for EngineCollection { + fn default() -> Self { + EngineCollection { python_engine: Ptr::default() } + } +} + +/// Returns all functions from a module. +pub fn module_functions<'ctx>(module: &Module<'ctx>) -> impl Iterator> { + struct FunctionValueIter<'ctx>(Option>); + + impl<'ctx> Iterator for FunctionValueIter<'ctx> { + type Item = FunctionValue<'ctx>; + + fn next(&mut self) -> Option { + let function = self.0; + self.0 = function.and_then(FunctionValue::get_next_function); + function + } + } + + FunctionValueIter(module.get_first_function()) +} + +/// Checks if this function is a QIR entry-point. +pub fn is_entry_point(function: FunctionValue) -> bool { + function + .get_string_attribute(AttributeLoc::Function, "entry_point") + .is_some() + || function + .get_string_attribute(AttributeLoc::Function, "EntryPoint") + .is_some() +} + +/// Looks through the entry-points available and either picks the method that matches the name +/// passed-in. +pub fn choose_entry_point<'ctx>( + functions: impl Iterator>, + name: Option<&str>, +) -> Result, String> { + if name.is_some() { + functions.filter(|f| name.unwrap() == f.get_name().to_str().unwrap()).next().ok_or("Can't find a method with this nane.".to_string()) + } else { + let eps: Vec = functions.filter(|f| is_entry_point(*f)).collect(); + if eps.is_empty() { + return Err("Can't find any entry-points.".to_string()); + } + + if eps.len() > 1 { + return Err("No specified method and more than one entry-point. Can't auto-detect.".to_string()); + } + Ok(*(eps.first().unwrap())) + } +} + +#[cfg(test)] +mod tests { + use std::borrow::Borrow; + use std::fs::canonicalize; + use bitflags::Flags; + use crate::builders::{PythonEngine}; + use crate::execution::run_file; + use crate::instructions::Value; + use crate::runtime::ActiveTracers; + use crate::smart_pointers::Ptr; + + #[test] + fn execute_qaoa() { + let relative_path = canonicalize("../tests/qsharp/qaoa/qir/qaoa.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let py_builder = Ptr::from(PythonEngine::default()); + run_file(path, &Vec::new(), py_builder.borrow(), None, ActiveTracers::empty()); + } + + #[test] + fn execute_simplified_oracle_generator() { + let relative_path = canonicalize("../tests/qsharp/simplified-oracle-generator/qir/simplified-oracle-generator.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let py_builder = Ptr::from(PythonEngine::default()); + run_file(path, &Vec::new(), py_builder.borrow(), None, ActiveTracers::empty()); + } + + #[test] + fn execute_oracle_generator() { + let relative_path = canonicalize("../tests/qsharp/oracle-generator/qir/oracle-generator.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let py_builder = Ptr::from(PythonEngine::default()); + run_file(path, &Vec::new(), py_builder.borrow(), None, ActiveTracers::empty()); + } + + #[test] + fn execute_minified_oracle_generator() { + let relative_path = canonicalize("../tests/qsharp/minified-oracle-generator/qir/minified-oracle-generator.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let py_builder = Ptr::from(PythonEngine::default()); + run_file(path, &vec![Value::Bool(true)], py_builder.borrow(), None, ActiveTracers::Graphs); + } +} diff --git a/src/munchkin/pykin/src/graphs.rs b/src/munchkin/pykin/src/graphs.rs new file mode 100644 index 0000000..d51c809 --- /dev/null +++ b/src/munchkin/pykin/src/graphs.rs @@ -0,0 +1,998 @@ +use std::borrow::{Borrow, BorrowMut}; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::fmt::{Display, Formatter}; +use std::iter::zip; +use std::ops::{Deref, DerefMut}; +use crate::{with_mutable, with_mutable_self}; +use crate::instructions::{Condition, Equalities, Expression, Gate, GateBuilder, Instruction, InstructionBuilder, Operator, Value}; +use crate::runtime::RuntimeContext; +use crate::smart_pointers::*; + +/// Walks the graph from its entry-point to its logical conclusion. Will take all pathways exactly +/// once. Walks a pathway until it finds an intersection/phi node then reverses and takes the path +/// not taken for that particular branch. +/// +/// This means it will reverse in isolated branching, but if you have branches that never intersect +/// they will only be walked after the first one has been entirely traversed. This includes if all +/// pathways only intersect on the exit node. +pub fn walk_logical_paths(graph: &Ptr) -> LogicalPathwayIterator { + LogicalPathwayIterator::new(graph) +} + +pub struct LogicalPathwayIterator { + graph: Ptr, + guard: HashSet, + next_node: VecDeque> +} + +/// Walks the graph top-down taking all branches as it goes. Not a flat walk, as it flip=flops +/// between branches it means any pathways that are heavily weighted on one side will be completed +/// later, sometimes exceptionally so. +impl LogicalPathwayIterator { + fn new(graph: &Ptr) -> LogicalPathwayIterator { + let mut vec = VecDeque::new(); + vec.append(VecDeque::from(graph.entry_points().iter().map(|val| val.clone()).collect::>>()).borrow_mut()); + LogicalPathwayIterator { graph: graph.clone(), guard: Default::default(), next_node: vec } + } + + /// Specifically, this will also show empty after the pathways have been walked. + /// So this can work for both 'is consumed' and 'is empty'. + pub fn is_empty(&self) -> bool { + self.next_node.is_empty() + } +} + +impl Iterator for LogicalPathwayIterator { + type Item = Ptr; + + fn next(&mut self) -> Option { + if self.next_node.is_empty() { + return None; + } + + let mut current_node = self.next_node.pop_back().expect("Can't be empty."); + let current_str = current_node.to_string(); + while self.guard.contains(¤t_node.id()) { + if let Some(potential_node) = self.next_node.pop_back() { + current_node = potential_node; + } else { + return None; + } + } + + // If we have a phi node then skip executing it until all its branches have also been + // evaluated. It will eternally be pushed back down the queue until its node has been + // traversed. + let mut phis = Vec::new(); + let inc_nodes = current_node.incoming_nodes(); + if inc_nodes.len() > 1 { + for (edge, node) in inc_nodes.iter() { + if !self.guard.contains(&node.id()) { + phis.push(current_node.clone()); + } + } + + if !phis.is_empty() { + for phi in phis { + self.next_node.push_back(phi); + } + self.next_node.push_front(current_node.clone()); + current_node = self.next_node.pop_back().expect("Can't be empty."); + } + } + + self.guard.insert(current_node.id()); + let edges = current_node.edges(); + + // We want to analyze conditional pathways first, so any non-conditional we just defer. + let mut uncond_next = None; + for edge in current_node.edges().outgoing.iter() { + // If our edge dosen't exist in the graph, just skip. + let node = self.graph.find_node(edge.end); + if node.is_none() { + continue; + } + + let node = node.unwrap(); + if edge.is_unconditional() { + uncond_next = Some(node); + } else { + self.next_node.push_back(node.clone()); + } + } + + if uncond_next.is_some() { + self.next_node.push_back(uncond_next.unwrap().clone()); + } + + Some(current_node.clone()) + } +} + +pub struct Edges { + pub incoming: Vec>, + pub outgoing: Vec>, +} + +impl Edges { + pub fn new() -> Edges { + Edges { incoming: Vec::new(), outgoing: Vec::new() } + } + + pub fn has_unconditional_out(&self) -> bool { + self.outgoing.iter().any(|val| val.conditions.is_none()) + } + + pub fn unconditional_out(&self) -> Option<&Ptr> { + self.outgoing.iter().filter(|val| val.conditions.is_none()).next() + } + + pub fn has_unconditional_in(&self) -> bool { + self.incoming.iter().any(|val| val.conditions.is_none()) + } + + pub fn unconditional_in(&self) -> Option<&Ptr> { + self.outgoing.iter().filter(|val| val.conditions.is_none()).next() + } +} + +impl Display for Edges { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let inc = self.incoming.iter().map(|val| val.start.to_string()).collect::>().join(", "); + let out = self.outgoing.iter().map(|val| val.end.to_string()).collect::>().join(", "); + f.write_str(format!("({})<->({})", inc, out).as_str()) + } +} + +pub struct AnalysisGraph { + pub identity: String, + + nodes: Ptr>>, + edges: Ptr>>, + + pub auto_attach_target: Ptr +} + +impl AnalysisGraph { + pub fn new(id: String) -> AnalysisGraph { + AnalysisGraph { + identity: id, + edges: Ptr::from(HashMap::default()), + nodes: Ptr::from(HashMap::default()), + auto_attach_target: Ptr::default() + } + } + + pub fn is_empty(&self) -> bool { + self.nodes.len() == 0 + } + + pub fn nodes(&self) -> Vec> { + self.nodes.values().map(|val| val.clone()).collect() + } + + pub fn edges(&self) -> Vec> { + self.edges.values().map(|val| val.clone()).collect() + } + + /// Returns all entry-points into the graph, so every node that has no natural incoming edge. + pub fn entry_points(&self) -> Vec> { + self.nodes.values().into_iter().filter(|val| val.is_entry_node()).map(|val| val.clone()).collect() + } + + /// Returns all exit-points of the graph, so every node that has no natural outgoing edge. + pub fn exit_points(&self) -> Vec> { + self.nodes.values().into_iter().filter(|val| val.is_exit_node()).map(|val| val.clone()).collect() + } + + /// Adds an edge between start and end nodes. Will throw if attempting to add unconditional + /// edges when the current nodes already have some assigned. + pub fn add_edge(&mut self, start: &Ptr, end: &Ptr, + assignments: Option>, + conditions: Option) { + let conjoining_edge = Ptr::from(Edge::new_with_metadata(start.id(), end.id(), assignments, conditions)); + let start_edges = self.edges_of_mut(start.id()); + if conjoining_edge.conditions.is_none() && start_edges.has_unconditional_out() { + panic!("Tried to add unconditional edge to target that already has one. This will leave an orphaned node. Start [{}], end [{}]", start.to_string(), end.to_string()) + } + + start_edges.outgoing.push(conjoining_edge.clone()); + let end_edges = self.edges_of_mut(end.id()); + end_edges.incoming.push(conjoining_edge.clone()); + } + + /// Attaches edge from the target to the newly-inserted node. + pub fn add_with_edge(&mut self, inst: &Ptr, target: &Ptr, + assignments: Option>, conditions: Option) -> Ptr { + let new_node = Ptr::from(Node::new(inst)); + self.add_node_with_edge(new_node.borrow(),false); + self.add_edge(target.borrow(), new_node.borrow(), assignments, conditions); + new_node + } + + pub fn edges_of_mut(&mut self, node_id: usize) -> &mut Ptr{ + if !self.edges.contains_key(&node_id) { + self.edges.insert(node_id, Ptr::from(Edges::new())); + } + + self.edges.get_mut(&node_id).unwrap() + } + + pub fn edges_of(&self, node_id: usize) -> &Ptr{ + if !self.edges.contains_key(&node_id) { + with_mutable_self!(self.edges.insert(node_id, Ptr::from(Edges::new()))); + } + + self.edges.get(&node_id).unwrap() + } + + /// Adds this node to the graph, assigning it as the next auto-attach target. If you want + /// an addition without the attach, look at add_orphan. + /// + /// While this node always gets attached as the next aa-target, you can cohose whether to add + /// an unconditional edge between the previous and the new one by using add_attached_edge. + /// You may not want to use this value in situations where you're dealing with the edge + /// attachment via another means. + pub fn add_node_with_edge(&mut self, node: &Ptr, add_attached_edge: bool) { + self.add_loose_node(node); + + if Ptr::is_not_null(&self.auto_attach_target) && add_attached_edge { + let val = self.auto_attach_target.clone(); + self.add_edge(val.borrow(), node.borrow(), None, None); + } + + self.auto_attach_target = node.clone(); + } + + /// Finds the node associated with this id. + pub fn find_node(&self, id: usize) -> Option<&Ptr> { + self.nodes.get(&id) + } + + /// Removes the next auto-attach target. + pub fn unattach(&mut self) { + self.auto_attach_target = Ptr::default(); + } + + /// Attaches the passed-in node to the current graphs auto-attach target and continues. + pub fn reattach(&mut self, node: &mut Ptr) { + self.add_node_with_edge(node, true); + } + + pub fn set_next_auto_attach(&mut self, node: &Ptr){ + self.auto_attach_target = node.clone() + } + + pub fn next_auto_attach(&self) -> &Ptr { + self.auto_attach_target.borrow() + } + + pub fn add_loose(&mut self, inst: Instruction) -> Ptr { + let mut val = Ptr::from(Node::new(&Ptr::from(inst))); + self.add_loose_node(val.borrow_mut()); + val + } + + fn add_loose_node(&mut self, node: &Ptr) { + let instruction_address = node.id(); + if !self.nodes.contains_key(instruction_address.borrow()) { + + // If our node comes from another graph we inherit the edges. + if Ptr::is_not_null(&node.linked_graph) { + let existing_edges = with_mutable!(node.linked_graph.edges_of_mut(instruction_address)); + let new_edges = self.edges_of_mut(instruction_address); + existing_edges.outgoing.iter().for_each(|edge| { + new_edges.outgoing.push(edge.clone_inner()); + }); + existing_edges.incoming.iter().for_each(|edge| { + new_edges.incoming.push(edge.clone_inner()); + }); + } + + with_mutable!(node.linked_graph = Ptr::from(self.borrow_mut())); + self.nodes.insert(instruction_address, node.clone()); + } + } + + pub fn add(&mut self, inst: Instruction) -> Ptr { + let mut val = Ptr::from(Node::new(&Ptr::from(inst))); + self.add_node_with_edge(val.borrow_mut(), true); + val + } + + /// Simply adds the node to the graph. + pub fn add_node(&mut self, node: &mut Ptr) { + self.add_node_with_edge(node, true); + } + + pub fn contains_node(&self, node: &Ptr) -> bool { + self.nodes.contains_key(node.id().borrow()) + } + + /// Removes this node from the current graph, including all edges in it. + pub fn remove(&mut self, node: &Ptr) { + let node_id = node.id(); + + let personal_edges = self.edges.get(&node_id); + if personal_edges.is_some() { + let personal_edges = personal_edges.unwrap().clone(); + + // Get the other end of the various relationships and remove the edge to this node. + personal_edges.outgoing.iter().for_each(|val| { + // Get the other end of the edge... + let edges = self.edges.get_mut(val.end.borrow()).expect("Has to exist."); + + // ... only get the edges that point at us... + let targets = edges.incoming.iter() + .filter(|val| val.start == node_id) + .collect::>(); + + // ... then obliterate. + for edge in targets { + // Needed because remove takes an index, and we need to re-eval the index each time. + // -1 would probably work, but for now just re-calc as most arrays will be small. + let current_position = edges.incoming.iter().position(|ival| FlexiPtr::eq(edge, ival)).unwrap().clone(); + with_mutable!(edges.incoming.remove(current_position)); + }; + }); + + // Then do the same again but with the opposite direction. + personal_edges.incoming.iter().for_each(|val| { + let edges = self.edges.get_mut(val.start.borrow()).expect("Has to exist."); + + let targets = edges.outgoing.iter() + .filter(|val| val.end == node_id) + .collect::>(); + + for edge in targets { + let current_position = edges.outgoing.iter().position(|ival| FlexiPtr::eq(edge, ival)).unwrap().clone(); + with_mutable!(edges.outgoing.remove(current_position)); + }; + }); + } + + self.nodes.remove(node_id.borrow()); + } + + /// Removes a node and squashes itself back into the target attachment node. + /// This means that all edges get inherited by the attached node EXCEPT for the unconditional + /// incoming node. + pub fn squash_back(&mut self, target_attach: &mut Ptr, removed_node: &mut Ptr) { + if !self.contains_node(target_attach) { + self.add_loose_node(target_attach); + } + + self.reassign_edges(target_attach, removed_node); + self.remove(removed_node); + } + + /// Reassigns all edges that are attached to `throwaway` onto `destination`. This includes + /// changing all edges on the orbiting nodes. + pub fn reassign_edges(&mut self, destination: &mut Ptr, throwaway: &mut Ptr) { + let throwaway_id = throwaway.id(); + let ephemeral = throwaway.edges_mut(); + let dest_id = destination.id(); + let merge_target = destination.edges_mut(); + + for mut edge in ephemeral.outgoing.iter_mut().filter(|val| val.end != dest_id).map(|val| val.clone_inner()) { + // Reassign edges on the other end. + for edge in self.edges_of_mut(edge.end).incoming.iter_mut() { + if edge.start == throwaway_id { + edge.start = dest_id; + } + } + + // Then just add a new edge. + edge.start = dest_id; + merge_target.outgoing.push(edge); + } + + for mut edge in ephemeral.incoming.iter_mut().filter(|val| val.start != dest_id).map(|val| val.clone_inner()) { + // Reassign edges on the other end. + for edge in self.edges_of_mut(edge.end).outgoing.iter_mut() { + if edge.end == throwaway_id { + edge.end = dest_id; + } + } + + edge.end = dest_id; + merge_target.incoming.push(edge); + } + } + + fn stringify(&self, f: &mut Formatter<'_>, graph_guard: &mut HashSet) -> std::fmt::Result { + f.write_str(format!("{}:\n", self.identity.as_str()).as_str()); + + let graph_walker = walk_logical_paths(&Ptr::from(self)); + let mut checked_nodes = HashSet::new(); + for next_node in graph_walker { + checked_nodes.insert(Ptr::as_address(&next_node)); + f.write_str(format!("{}\n", next_node.to_string()).as_str()); + } + + if checked_nodes.len() != self.nodes.len() { + f.write_str("\n"); + f.write_str("Orphans:\n"); + for node in self.nodes.values().filter(|val| !checked_nodes.contains(&Ptr::as_address(&val))) { + f.write_str(format!("{}\n", node.to_string()).as_str()); + } + } + + f.write_str("") + } +} + +impl Display for AnalysisGraph { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let mut guard = HashSet::new(); + self.stringify(f, &mut guard) + } +} + +/// Wrapper around a subgraph call within a graph since each execution point has different +/// variables going into it. This is an easy way to isolate them since the variable mappings will +/// stay static after evaluation. +pub struct CallableAnalysisGraph { + pub analysis_graph: Ptr, + + /// The declared input variables, in order, which demand to be in place by this graph. + /// So if you have a declaration of method(arg1, arg2), and a call of it is method(1, %seven) + /// it allows you to link arg1 = 1, arg2 = %seven. + pub argument_mappings: HashMap>, +} + +impl Clone for CallableAnalysisGraph { + fn clone(&self) -> Self { + CallableAnalysisGraph::new_with_args(&self.analysis_graph, self.argument_mappings.clone()) + } +} + +impl CallableAnalysisGraph { + pub fn new(graph: &Ptr) -> CallableAnalysisGraph { + CallableAnalysisGraph { + analysis_graph: graph.clone(), + argument_mappings: HashMap::new() + } + } + + pub fn new_with_args(graph: &Ptr, argument_mappings: HashMap>) -> CallableAnalysisGraph { + CallableAnalysisGraph { + analysis_graph: graph.clone(), + argument_mappings + } + } +} + +impl Display for CallableAnalysisGraph { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if !self.argument_mappings.is_empty() { + f.write_str("Arguments:\n"); + for (key, value) in self.argument_mappings.iter() { + f.write_str(format!("{} = {}\n", key, value.to_string()).as_str()); + } + f.write_str("\n"); + } + + self.analysis_graph.fmt(f) + } +} + +impl PartialEq for CallableAnalysisGraph { + fn eq(&self, other: &Self) -> bool { + if self.analysis_graph.identity != other.analysis_graph.identity { + return false; + } + + for ((lkey, lvalue), (rkey, rvalue)) in + zip(self.argument_mappings.iter(), other.argument_mappings.iter()) { + if lkey != rkey { + return false + } + + if lvalue != rvalue { + return false + } + } + + return true; + } +} + +impl Eq for CallableAnalysisGraph {} + +/// Analysis graph that has been fully analyzed and is ready to be executed. Carries graph and +/// appropriate metadata. +pub struct ExecutableAnalysisGraph { + pub callable_graph: Ptr, + pub context: Ptr +} + +impl ExecutableAnalysisGraph { + pub fn new(graph: &Ptr) -> ExecutableAnalysisGraph { + ExecutableAnalysisGraph { callable_graph: graph.clone(), context: Ptr::from(RuntimeContext::new()) } + } + + pub fn with_context(graph: &Ptr, context: &Ptr) -> ExecutableAnalysisGraph { + ExecutableAnalysisGraph { callable_graph: graph.clone(), context: context.clone() } + } + + pub fn analysis_graph(&self) -> &Ptr { + &self.callable_graph.analysis_graph + } +} + +impl Display for ExecutableAnalysisGraph { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if !self.context.globals.is_empty() { + f.write_str("Globals:\n"); + for (key, value) in self.context.globals.iter() { + f.write_str(format!("{} = {}\n", key, value.to_string()).as_str()); + } + f.write_str("\n"); + } + + // Print out the arguments needed for our root graph seperately. + if !self.callable_graph.argument_mappings.is_empty() { + f.write_str("Arguments:\n"); + for (key, value) in self.callable_graph.argument_mappings.iter() { + f.write_str(format!("{}\n", key).as_str()); + } + f.write_str("\n"); + } + + f.write_str("[Root]\n"); + self.callable_graph.analysis_graph.fmt(f); + f.write_str("\n"); + + for graph in self.context.method_graphs.values() { + if graph.identity == self.callable_graph.analysis_graph.identity { continue } + + graph.fmt(f); + f.write_str("\n"); + } + + f.write_str("") + } +} + +/// Wrapper for an AnalysisGraph that adds helper methods for building and manipulating the graph. +pub struct AnalysisGraphBuilder { + pub graph: Ptr +} + +impl AnalysisGraphBuilder { + pub fn new(graph: &Ptr) -> AnalysisGraphBuilder { + AnalysisGraphBuilder { graph: graph.clone() } + } + + pub fn Initialize(&self) -> Ptr { + with_mutable_self!( + self.graph.add(Instruction::Initialize()) + ) + } + + pub fn Reset(&self, qbs: Value) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Reset(qbs)) + ) + } + + pub fn ActivateQubit(&self, var: String, length: Option) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::ActivateQubit(var, length)) + ) + } + + pub fn DeactivateQubit(&self, qbs: Value) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::DeactivateQubit(qbs)) + ) + } + + pub fn Gate(&self, gate: Gate) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Gate(gate)) + ) + } + + pub fn Return(&self, vars: Value) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Return(vars)) + ) + } + + pub fn Assign(&self, name: String, value: Value) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Assign(name, value)) + ) + } + + pub fn Label(&self, label: String) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Label(label)) + ) + } + + pub fn Arithmatic(&self, var: String, left: Value, op: Operator, right: Value) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Arithmatic(var, left, op, right)) + ) + } + + pub fn Condition(&self, var: String, left: Value, equality: Equalities, right: Value) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Condition(var, Condition::new(left, equality, right))) + ) + } + + pub fn Throw(&self, message: Option) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Throw(message)) + ) + } + + pub fn Log(&self, message: Value) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Log(message)) + ) + } + + pub fn Subgraph(&self, graph: Value, variable: Option) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Subgraph(graph, variable)) + ) + } + + pub fn I(&self, qx: Value) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Gate(GateBuilder::I(qx))) + ) + } + + pub fn U(&self, qx: Value, theta: f64, phi: f64, lambda: f64) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Gate( + GateBuilder::U( + qx, + Value::Float(theta), + Value::Float(phi), + Value::Float(lambda) + )))) + } + + pub fn R(&self, pauli: Value, qx: Value, radians: Value) -> Ptr { + with_mutable_self!( + self.graph.add( + InstructionBuilder::Gate( + GateBuilder::R( + pauli.clone(), + qx.clone(), + radians.clone() + )) + ) + ) + } + + pub fn CR(&self, pauli: Value, conditions: Value, target: Value, radians: Value) -> Ptr { + with_mutable_self!( + self.graph.add( + InstructionBuilder::Gate( + GateBuilder::CR(pauli, conditions, target, radians) + ) + ) + ) + } + + pub fn X(&self, qx: Value, radians: f64) -> Ptr { + with_mutable_self!( + self.graph.add( + InstructionBuilder::Gate( + GateBuilder::X(qx, Value::from(radians)) + )) + ) + } + + pub fn Y(&self, qx: Value, radians: f64) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Gate( + GateBuilder::Y(qx, Value::from(radians)) + )) + ) + } + + pub fn Z(&self, qx: Value, radians: f64) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Gate( + GateBuilder::Z(qx, Value::from(radians)) + )) + ) + } + + pub fn CX(&self, conditions: Value, target: Value, radians: f64) -> Ptr { + with_mutable_self!( + self.graph.add( + InstructionBuilder::Gate( + GateBuilder::CX( + conditions.clone(), + target.clone(), + Value::Float(radians) + ))) + ) + } + + pub fn CZ(&self, conditions: Value, target: Value, radians: f64) -> Ptr { + with_mutable_self!( + self.graph.add( + InstructionBuilder::Gate( + GateBuilder::CZ( + conditions.clone(), + target.clone(), + Value::Float(radians) + ))) + ) + } + + pub fn CY(&self, conditions: Value, target: Value, radians: f64) -> Ptr { + with_mutable_self!( + self.graph.add( + InstructionBuilder::Gate( + GateBuilder::CY( + conditions.clone(), + target.clone(), + Value::Float(radians) + ))) + ) + } + + pub fn Measure(&self, qx: Value, result: Value, var: Value) -> Ptr { + with_mutable_self!( + self.graph.add( + InstructionBuilder::Gate( + GateBuilder::Measure(qx, result, var) + )) + ) + } + + pub fn Expression(&self, expr: Expression, variable: Option) -> Ptr { + with_mutable_self!( + self.graph.add(InstructionBuilder::Expression(expr, variable)) + ) + } +} + +impl Deref for AnalysisGraphBuilder { + type Target = AnalysisGraph; + + fn deref(&self) -> &Self::Target { + self.graph.deref() + } +} + +impl DerefMut for AnalysisGraphBuilder { + fn deref_mut(&mut self) -> &mut Self::Target { + self.graph.deref_mut() + } +} + +pub struct Edge { + /// ID of the node that's on the end of this edge. + pub start: usize, + pub end: usize, + + /// An edge assignment means when this edge is traveled you want to assign these values to + /// these variables. + pub assignments: Option>, + pub conditions: Option +} + +impl Clone for Edge { + fn clone(&self) -> Self { + Edge { + start: self.start.clone(), + end: self.end.clone(), + assignments: self.assignments.as_ref().map(|val| val.iter().map(|ival| ival.clone()).collect::>()), + conditions: self.conditions.as_ref().map(|val| val.clone()) + } + } +} + +impl Edge { + pub fn new(start: usize, end: usize) -> Edge { + Edge::new_with_metadata(start, end, None, None) + } + + pub fn new_with_metadata( + start: usize, end: usize, assignments: Option>, conditions: Option) -> Edge { + Edge { start, end, assignments, conditions } + } + + /// This will initialize the vector if it's None before returning it. + pub fn assignments(&mut self) -> &mut Vec<(String, Value)> { + if self.assignments.is_none() { + self.assignments = Some(Vec::new()); + } + + self.assignments.as_mut().unwrap() + } + + /// This will initialize the vector if it's None before returning it. + pub fn conditions(&mut self) -> Option { + self.conditions.as_mut().map_or(None, |val| Some(val.clone())) + } + + pub fn is_unconditional(&self) -> bool { + self.conditions.is_none() + } + + pub(crate) fn stringify_condition(&self) -> String { + if let Some(val) = self.conditions.as_ref() { + format!(" if {}", val.to_string()).to_string() + } else { + "".to_string() + } + } + + pub(crate) fn stringify_assigns(&self) -> String { + if let Some(val) = self.assignments.as_ref() { + if val.is_empty() { + return "".to_string(); + } + format!(" with {}", val.iter().map(|val| { + format!("{} = {}", val.0, val.1.to_string()).to_string() + }).collect::>().join(", ")) + } else { + "".to_string() + } + } +} + +impl Display for Edge { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let condition = self.stringify_condition(); + let assigns = self.stringify_assigns(); + + f.write_str( + format!("{}->{}{}", self.start, self.end, + if !condition.is_empty() || !assigns.is_empty() { + format!(" ({}{}{})", condition, if !condition.is_empty() && !assigns.is_empty() { " and" } else { "" }, assigns) + } else { + "".to_string() + }).as_str() + ) + } +} + +pub struct Node { + linked_graph: Ptr, + pub instruction: Ptr, + + // Assigned just before execution, states precisely what position the node + // in the graph is in relation to its breathren + pub order: Option +} + +impl Node { + pub fn new(inst: &Ptr) -> Node { + Node { + linked_graph: Ptr::None, + instruction: inst.clone(), + order: None + } + } + + pub fn id(&self) -> usize { + Ptr::as_address(&self.instruction) + } + + pub fn edges_mut(&mut self) -> &mut Ptr { + let id = self.id().clone(); + self.linked_graph.edges_of_mut(id) + } + + pub fn edges(&self) -> &Ptr { + let id = self.id().clone(); + self.linked_graph.edges_of(id) + } + + pub fn out_edges(&self) -> &[Ptr] { + self.edges().outgoing.borrow() + } + + pub fn in_edges(&self) -> &[Ptr] { + self.edges().incoming.borrow() + } + + pub fn incoming_nodes(&self) -> Vec<(Ptr, Ptr)> { + self.edges().incoming.iter() + .map(|val| (val.clone(), self.linked_graph.find_node(val.start).expect("Node should exist.").clone())) + .collect() + } + + pub fn outgoing_nodes(&self) -> Vec<(Ptr, Ptr)> { + self.edges().outgoing.iter() + .map(|val| (val.clone(), self.linked_graph.find_node(val.end).expect("Node should exist.").clone())) + .collect() + } + + pub fn incoming_conditional_nodes(&self) -> Vec<(Ptr, Ptr)> { + self.edges().incoming.iter() + .filter(|val| val.conditions.is_some()) + .map(|val| (val.clone(), self.linked_graph.find_node(val.start).expect("Node should exist.").clone())) + .collect() + } + + pub fn outgoing_conditional_nodes(&mut self) -> Vec<(Ptr, Ptr)> { + self.edges().outgoing.iter() + .filter(|val| val.conditions.is_some()) + .map(|val| (val.clone(), self.linked_graph.find_node(val.end).expect("Node should exist.").clone())) + .collect() + } + + /// The next unconditional node. + pub fn next_node(&mut self) -> Option<(Ptr, Ptr)> { + self.edges().outgoing.iter().filter(|val| val.conditions.is_none()).map(|val| + (val.clone(), self.linked_graph.find_node(val.end).expect("Node should exist.").clone()) + ).next() + } + + pub fn is_exit_node(&self) -> bool { + self.linked_graph.edges_of(self.id()).outgoing.is_empty() + } + + pub fn is_entry_node(&self) -> bool { + self.linked_graph.edges_of(self.id()).incoming.is_empty() + } + + pub(crate) fn stringify_edge_target(&self, edge: &Edge, target_node: &Node) -> String { + let condition = edge.stringify_condition(); + let assigns = edge.stringify_assigns(); + + format!("{}{}{}{}", target_node.order.map_or_else(|| target_node.id().to_string(), |val| val.to_string()), + condition, if !condition.is_empty() && !assigns.is_empty() { " and" } else { "" }, assigns) + } +} + +impl PartialEq for Node { + fn eq(&self, other: &Self) -> bool { + self.id() == other.id() + } +} + +impl Eq for Node { +} + +impl Clone for Node { + fn clone(&self) -> Self { + Node { + linked_graph: self.linked_graph.clone(), + instruction: self.instruction.clone(), + order: self.order.clone(), + } + } +} + +impl Display for Node { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + let node_id = self.order.map_or_else(|| self.id().to_string(), |val| val.to_string()); + let incoming = self.incoming_nodes().iter().map(|(edge, node)| { + self.stringify_edge_target(edge.deref(), node.deref()) + }).collect::>().join(" | "); + let out = self.outgoing_nodes().iter().map(|(edge, node)| { + self.stringify_edge_target(edge.deref(), node.deref()) + }).collect::>().join(" | "); + + let stringified_instruction = match self.instruction.deref() { + Instruction::Subgraph(sg, var) => { + let stringified_graph = match sg.deref() { + Value::Callable(sg) => sg.analysis_graph.identity.clone(), + val => val.to_string() + }; + + format!("{}calling {}", var.as_ref().map_or(String::from(""), |val| format!("{} = ", val.to_string())), stringified_graph) + } + inst => inst.to_string() + }; + + f.write_str(format!("({}) -> ({}) {} -> ({})", incoming, node_id, stringified_instruction, out).as_str()) + } +} \ No newline at end of file diff --git a/src/munchkin/pykin/src/hardware.rs b/src/munchkin/pykin/src/hardware.rs new file mode 100644 index 0000000..4ccbbc1 --- /dev/null +++ b/src/munchkin/pykin/src/hardware.rs @@ -0,0 +1,37 @@ +use std::fmt::{Display, Formatter}; +use std::hash::{Hash, Hasher}; + +#[derive(Debug, Clone, Copy)] +pub struct Qubit { + pub index: i64 +} + +impl Qubit { + pub fn new(index: i64) -> Qubit { + Qubit { index } + } + + pub fn debug(&self) -> String { + format!("qb[{}]", self.index) + } +} + +impl Display for Qubit { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str( format!("qb[{}]", self.index).as_str()) + } +} + +impl PartialEq for Qubit { + fn eq(&self, other: &Self) -> bool { + self.index == other.index + } +} + +impl Eq for Qubit {} + +impl Hash for Qubit { + fn hash(&self, state: &mut H) { + state.write_i64(self.index) + } +} diff --git a/src/munchkin/pykin/src/instructions.rs b/src/munchkin/pykin/src/instructions.rs new file mode 100644 index 0000000..c25130c --- /dev/null +++ b/src/munchkin/pykin/src/instructions.rs @@ -0,0 +1,1084 @@ +use std::borrow::Borrow; +use std::fmt::{Display, Formatter}; +use std::{ops}; +use std::cmp::Ordering; +use std::ops::{BitAnd, BitOr, BitXor, Deref}; +use crate::analysis::{AnalysisResult, QuantumProjection}; +use crate::with_mutable; +use crate::graphs::{CallableAnalysisGraph}; +use crate::hardware::Qubit; +use crate::smart_pointers::{Ptr}; + +#[derive(Copy, Clone)] +pub enum Equalities { + Equals, + NotEquals, + GreaterThan, + LessThan, + GreaterOrEqualThan, + LessOrEqualThan, +} + +impl Display for Equalities { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Equalities::Equals => { "==" } + Equalities::NotEquals => { "!=" } + Equalities::GreaterThan => { ">" } + Equalities::LessThan => { "<" } + Equalities::GreaterOrEqualThan => { ">=" } + Equalities::LessOrEqualThan => { "<=" } + }) + } +} + +pub enum Operator { + Multiply, + Divide, + Add, + Subtract, + + // Binary operators + Or, + And, + Xor +} + +impl Display for Operator { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Operator::Multiply => "*", + Operator::Divide => "/", + Operator::Add => "+", + Operator::Subtract => "-", + Operator::Or => "|", + Operator::And => "&", + Operator::Xor => "^", + }) + } +} + +pub struct Condition { + pub equality: Equalities, + pub left: Value, + pub right: Value +} + +impl Clone for Condition { + fn clone(&self) -> Self { + Condition::new(self.left.clone(), self.equality, self.right.clone()) + } +} + +impl Display for Condition { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(format!("{}{}{}", self.left, self.equality, self.right).as_str()) + } +} + +impl Condition { + pub fn new(left: Value, equality: Equalities, right: Value) -> Condition { + Condition {left, right, equality} + } +} + +// TODO: Make assignments be doable without adding an option to the instruction. +// Probably just another instruction - assign expression. Muse upon it. +// Less important now since assignments are simplified. +pub enum Instruction { + /// Instruction that does nothing. + NoOp, + + // Quantum + Initialize(), + Reset(Ptr), + + /// Activates a qubit for this scope. Optional value is size of qubit array + /// that should be allocated. + ActivateQubit(String, Option>), + + /// Deactivates this qubit, releasing it. + DeactivateQubit(Ptr), + + Gate(Ptr), + Return(Ptr), + + // Classical + Assign(String, Ptr), + Label(String), + + /// Assignment variable for the result. + Arithmatic(String, Ptr, Operator, Ptr), + Condition(String, Ptr), + + // Not directly mappable to programatic throwing, just means 'fail immediately'. + Throw(Option), + Log(Ptr), + + /// Reference to the graph to execute, with an optional place to put the result. + Subgraph(Ptr, Option), + + /// Dynamic expression that doesn't require a distinct operation right now. + /// Expression to execute with optional value to assign result into. + Expression(Expression, Option), +} + +pub struct InstructionBuilder { } + +impl InstructionBuilder { + /// See [Instruction::NoOp]. + pub fn NoOp() -> Instruction { + Instruction::NoOp + } + + /// See [Instruction::Initialize]. + pub fn Initialize() -> Instruction { + Instruction::Initialize() + } + + /// See [Instruction::Reset]. + pub fn Reset(val: Value) -> Instruction { + Instruction::Reset(Ptr::from(val)) + } + + /// See [Instruction::ActivateQubit]. + pub fn ActivateQubit(variable: String, size: Option) -> Instruction { + Instruction::ActivateQubit(variable, size.map(|val| Ptr::from(val))) + } + + /// See [Instruction::DeactivateQubit]. + pub fn DeactivateQubit(value: Value) -> Instruction { + Instruction::DeactivateQubit(Ptr::from(value)) + } + + /// See [Instruction::Gate]. + pub fn Gate(gate: Gate) -> Instruction { + Instruction::Gate(Ptr::from(gate)) + } + + /// See [Instruction::Return]. + pub fn Return(value: Value) -> Instruction { + Instruction::Return(Ptr::from(value)) + } + + /// See [Instruction::Assign]. + pub fn Assign(variable: String, value: Value) -> Instruction { + Instruction::Assign(variable, Ptr::from(value)) + } + + /// See [Instruction::Label]. + pub fn Label(name: String) -> Instruction { + Instruction::Label(name) + } + + /// See [Instruction::Arithmatic]. + pub fn Arithmatic(variable: String, left: Value, op: Operator, right: Value) -> Instruction { + Instruction::Arithmatic(variable, Ptr::from(left), op, Ptr::from(right)) + } + + /// See [Instruction::Condition]. + pub fn Condition(variable: String, cond: Condition) -> Instruction { + Instruction::Condition(variable, Ptr::from(cond)) + } + + /// See [Instruction::Throw]. + pub fn Throw(message: Option) -> Instruction { + Instruction::Throw(message) + } + + /// See [Instruction::Log]. + pub fn Log(message: Value) -> Instruction { + Instruction::Log(Ptr::from(message)) + } + + /// See [Instruction::Subgraph]. + pub fn Subgraph(reference: Value, result_var: Option) -> Instruction { + Instruction::Subgraph(Ptr::from(reference), result_var) + } + + /// See [Instruction::Expression]. + pub fn Expression(expr: Expression, result_var: Option) -> Instruction { + Instruction::Expression(expr, result_var) + } +} + +impl Display for Instruction { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Instruction::NoOp => { "noop".to_string() } + Instruction::Initialize() => { "init".to_string() } + Instruction::Reset(qbs) => { format!("reset {}", qbs.to_string()) } + Instruction::ActivateQubit(var, opt) => { format!("{} = activate qb{}", var, opt.as_ref().map_or("".to_string(), |val| format!("[{}]", val.to_string()))) } + Instruction::DeactivateQubit(qbs) => { format!("deactivate qb {}", qbs.to_string()) } + Instruction::Gate(gate) => { gate.to_string() } + Instruction::Return(val) => { format!("return {}", val.to_string()) } + Instruction::Assign(name, val) => { format!("{} = {}", name, val) } + Instruction::Label(name) => { format!("label {}", name) } + Instruction::Arithmatic(var, left, op, right) => { format!("{} = {}{}{}", var, left, op, right) } + Instruction::Condition(var, cond) => { format!("{} = {}", var, cond.to_string()) } + Instruction::Throw(ex) => { + if ex.is_some() { format!("throw '{}'", ex.as_ref().unwrap())} else { "throw".to_string() } + } + Instruction::Log(log) => { format!("log '{}'", log) } + Instruction::Subgraph(sg, var) => { + format!("{}{}", var.as_ref().map_or(String::from(""), |val| format!("{} = ", val.to_string())), sg.to_string()) + } + Instruction::Expression(expr, var) => { + if let Some(variable) = var { + format!("{} = {}", variable, expr.to_string()) + } else { + expr.to_string() + } + } + }.as_str()) + } +} + +pub enum LambdaModifier { + Ctl, + Adj +} + +pub enum Expression { + Clone(Value), + Length(Value), + NegateSign(Value), + Stringify(Value), + + /// Allows dynamically injecting arguments into a callable. + ArgInjection(Value, Option), + + MakeCtrlAdj(Value, LambdaModifier) +} + +impl Display for Expression { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str( + match self { + Expression::Clone(value) => format!("clone {}", value.to_string()), + Expression::Length(value) => format!("length {}", value.to_string()), + Expression::NegateSign(value) => format!("sign negate {}", value.to_string()), + Expression::Stringify(value) => format!("stringify {}", value.to_string()), + Expression::ArgInjection(graph, val) => format!("inject {} into {}", val.as_ref().map_or("".to_string(), |val| val.to_string()), graph.to_string()), + Expression::MakeCtrlAdj(val, modifier) => + format!("Swapping {} to {}", val.to_string(), + match modifier { + LambdaModifier::Ctl => "ctrl", + LambdaModifier::Adj => "adj" + } + ) + }.as_str()) + } +} + +#[derive(Clone, Copy, Eq, PartialEq)] +pub enum Pauli { + I = 0, + X = -1, + Z = -2, + Y = -3 +} + +impl Pauli { + pub fn from_num(index: &i8) -> Pauli { + match index { + 0 => Pauli::I, + -1 => Pauli::X, + -2 => Pauli::Z, + -3 => Pauli::Y, + _ => panic!("Not a valid int for pauli: {}.", index.to_string()) + } + } +} + +impl Display for Pauli { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Pauli::I => "I", + Pauli::X => "X", + Pauli::Z => "Z", + Pauli::Y => "Y" + }) + } +} + +// TODO: Remove pointers from objects who own the value - like qubit, analysis result, etc. +// A value should _be_ the representation of the value, and are always themselves wrapped in +// pointers, so the inner value also being a pointer increases complexity and adds potential for +// errors. + +pub enum Value { + Empty, + Byte(i8), + Short(i16), + Int(i64), + Long(i128), + Bool(bool), + Float(f64), + String(String), + Pauli(Pauli), + Qubit(Qubit), + Array(Vec>), + + /// List of qubits this promise needs, the axis it wants to measure on and the projection + /// the result should be got from. + QuantumPromise(Vec, Ptr), + AnalysisResult(Ptr), + + /// First value is the in-line variable the value is referencing, the second is additional + /// information about what this is pointing to, such as an indexer if the value is an array, + /// or further field if it's pointing at another composite object. + Ref(String, Option>), + + /// Allows graphs to be propagated as arguments. These are special and won't work in every operation. + Callable(Ptr) +} + +impl Clone for Value { + fn clone(&self) -> Self { + // TODO: As above, strip pointers away from certain objects. + match self { + Value::Empty => Value::Empty, + Value::Byte(val) => Value::Byte(val.clone()), + Value::Short(val) => Value::Short(val.clone()), + Value::Int(val) => Value::Int(val.clone()), + Value::Long(val) => Value::Long(val.clone()), + Value::Bool(val) => Value::Bool(val.clone()), + Value::Float(val) => Value::Float(val.clone()), + Value::String(val) => Value::String(val.clone()), + Value::Pauli(val) => Value::Pauli(val.clone()), + Value::Qubit(qb) => Value::Qubit(qb.clone()), + Value::Array(array) => Value::Array(array.iter().map(|val| val.clone_inner()).collect()), + Value::QuantumPromise(qbs, proj) => + Value::QuantumPromise(qbs.clone(), proj.clone()), + Value::AnalysisResult(res) => Value::AnalysisResult(res.clone_inner()), + Value::Ref(ref_, optional) => Value::Ref(ref_.clone(), optional.as_ref().map(|val| val.clone_inner())), + Value::Callable(graph) => Value::Callable(graph.clone_inner()), + } + } +} + +// TODO: May want to return references in the as_x methods. + +impl Value { + pub fn try_as_int(&self) -> Option { + match self { + Value::Bool(b) => Some(if b.clone() == true { 1 } else { 0 }), + Value::Byte(b) => Some(b.clone() as i64), + Value::Short(s) => Some(s.clone() as i64), + Value::Int(i) => Some(i.clone()), + Value::Long(l) => Some(l.clone() as i64), + Value::Float(f) => Some(f.clone() as i64), + Value::QuantumPromise(qbs, projection) => Some(if with_mutable!(projection.results_for(qbs).is_one()) { 1 } else { 0 }), + _ => None + } + } + + pub fn as_int(&self) -> i64 { + self.try_as_int().expect(format!("Not a numeric: {}.", self.to_string()).as_str()) + } + + pub fn try_as_byte(&self) -> Option { + match self { + Value::Bool(b) => Some(if b.clone() == true { 1 } else { 0 }), + Value::Byte(b) => Some(b.clone()), + Value::Short(s) => Some(s.clone() as i8), + Value::Int(i) => Some(i.clone() as i8), + Value::Long(l) => Some(l.clone() as i8), + Value::Float(f) => Some(f.clone() as i8), + Value::QuantumPromise(qbs, projection) => Some(if with_mutable!(projection.results_for(qbs).is_one()) { 1 } else { 0 }), + _ => None + } + } + + pub fn as_byte(&self) -> i8 { + self.try_as_byte().expect(format!("Not a byte: {}.", self.to_string()).as_str()) + } + + pub fn try_as_short(&self) -> Option { + match self { + Value::Bool(b) => Some(if b.clone() == true { 1 } else { 0 }), + Value::Byte(b) => Some(b.clone() as i16), + Value::Short(s) => Some(s.clone()), + Value::Int(i) => Some(i.clone() as i16), + Value::Long(l) => Some(l.clone() as i16), + Value::Float(f) => Some(f.clone() as i16), + Value::QuantumPromise(qbs, projection) => Some(if with_mutable!(projection.results_for(qbs).is_one()) { 1 } else { 0 }), + _ => None + } + } + + pub fn as_short(&self) -> i16 { + self.try_as_short().expect(format!("Not a short: {}.", self.to_string()).as_str()) + } + + pub fn try_as_long(&self) -> Option { + match self { + Value::Bool(b) => Some(if b.clone() == true { 1 } else { 0 }), + Value::Byte(b) => Some(b.clone() as i128), + Value::Short(s) => Some(s.clone() as i128), + Value::Int(i) => Some(i.clone() as i128), + Value::Long(l) => Some(l.clone()), + Value::Float(f) => Some(f.clone() as i128), + Value::QuantumPromise(qbs, projection) => Some(if with_mutable!(projection.results_for(qbs).is_one()) { 1 } else { 0 }), + _ => None + } + } + + pub fn as_long(&self) -> i128 { + self.try_as_long().expect(format!("Not a long: {}.", self.to_string()).as_str()) + } + + pub fn try_as_float(&self) -> Option { + match self { + Value::Bool(b) => Some(if b.clone() == true { 1.0 } else { 0.0 }), + Value::Byte(b) => Some(b.clone() as f64), + Value::Short(s) => Some(s.clone() as f64), + Value::Int(i) => Some(i.clone() as f64), + Value::Long(l) => Some(l.clone() as f64), + Value::Float(f) => Some(f.clone()), + Value::QuantumPromise(qbs, projection) => Some(if with_mutable!(projection.results_for(qbs).is_one()) { 1.0 } else { 1.0 }), + _ => None + } + } + + pub fn as_float(&self) -> f64 { + self.try_as_float().expect(format!("Not a float: {}.", self.to_string()).as_str()) + } + + pub fn try_as_array(&self) -> Option<&Vec>> { + match self { + Value::Array(ar) => Some(ar.as_ref()), + _ => None + } + } + + pub fn as_array(&self) -> &Vec> { + self.try_as_array().expect(format!("Not an array: {}.", self.to_string()).as_str()) + } + + pub fn try_as_qubit(&self) -> Option<&Qubit> { + match self { + Value::Qubit(qb) => Some(qb), + _ => None + } + } + + pub fn as_qubit(&self) -> &Qubit { + self.try_as_qubit().expect(format!("Not a qubit: {}.", self.to_string()).as_str()) + } + + /// Attempts to retrieve the inner value of a Value::String. + /// Important to note it does not do a to_string on the object, it only retrieves the + /// inner string from a Value designated as a string. + pub fn try_as_string(&self) -> Option { + match self { + Value::String(str_) => Some(str_.clone()), + _ => None + } + } + + /// Attempts to retrieve the inner value of a Value::String. + /// See [`Value::try_as_string`] for some additional details. + pub fn as_string(&self) -> String { + self.try_as_string().expect(format!("Not a string: {}.", self.to_string()).as_str()) + } + + pub fn try_as_bool(&self) -> Option { + if let Some(value) = self.try_as_byte() { + if value != 0 && value != 1 { + panic!("Bool int conversion not 0 or 1.") + } + + return Some(value == 1); + } + + match self { + Value::Bool(val) => Some(val.clone()), + _ => None + } + } + + pub fn as_bool(&self) -> bool { + self.try_as_bool().expect(format!("Not a bool: {}.", self.to_string()).as_str()) + } + + pub fn try_as_reference(&self) -> Option<(String, Option>)> { + match self { + Value::Ref(ref_, additional) => Some((ref_.clone(), additional.as_ref().map(|val| val.clone()))), + _ => None + } + } + + pub fn as_reference(&self) -> (String, Option>) { + self.try_as_reference().expect(format!("Not a reference: {}.", self.to_string()).as_str()) + } + + pub fn try_as_pauli(&self) -> Option { + // If we're a small int, automatically map. + if let Some(value) = self.try_as_byte() { + return Some(Pauli::from_num(&value)); + } + + match self { + Value::Pauli(pauli) => Some(pauli.clone()), + _ => None + } + } + + pub fn as_pauli(&self) -> Pauli { + self.try_as_pauli().expect(format!("Not a pauli: {}.", self.to_string()).as_str()) + } + + pub fn try_as_analysis_result(&self) -> Option> { + // TODO: Coerce more values into an analysis result if possible. + match self { + Value::AnalysisResult(res) => Some(res.clone()), + _ => None + } + } + + pub fn as_analysis_result(&self) -> Ptr { + self.try_as_analysis_result().expect(format!("Not an analysis result: {}.", self.to_string()).as_str()) + } + + pub fn try_as_callable(&self) -> Option> { + match self { + Value::Callable(res) => Some(res.clone()), + _ => None + } + } + + pub fn as_callable(&self) -> Ptr { + self.try_as_callable().expect(format!("Not a callable: {}.", self.to_string()).as_str()) + } +} + +// TODO: Improve projection results. It's a value distribution (and many other forms), come up +// with rules regarding certain numbers. + +impl PartialEq for Value { + fn eq(&self, other: &Self) -> bool { + match self { + Value::Empty => { match other { + Value::Empty => true, + _ => false + } + } + Value::Byte(b) => other.try_as_byte().map_or(false, |other_b| *b == other_b), + Value::Short(s) => other.try_as_short().map_or(false, |other_s| *s == other_s), + Value::Int(i) => other.try_as_int().map_or(false, |other_i| *i == other_i), + Value::Long(l) => other.try_as_long().map_or(false, |other_l| *l == other_l), + Value::Bool(b) => other.try_as_bool().map_or(false, |other_b| *b == other_b), + Value::Float(f) => other.try_as_float().map_or(false, |other_f| *f == other_f), + Value::String(s) => other.try_as_string().map_or(false, |other_s| *s == other_s), + Value::Pauli(p) => other.try_as_pauli().map_or(false, |other_p| *p == other_p), + Value::Qubit(qb) => other.try_as_qubit().map_or(false, |other_qb| qb == other_qb), + Value::Array(arr) => other.try_as_array().map_or(false, |other_arr| { + arr.len() == other_arr.len() && + arr.iter().zip(other_arr.iter()).map(|(l, r)| l == r).all(|val| val == true) + }), + Value::Ref(ref_, additional) => { + match other { + Value::Ref(other_ref, other_additional) => { + if additional.is_some() != other_additional.is_some() { + return false; + } + + if ref_ != other_ref { + return false; + } + + if let Some(out_additional) = additional { + let their_additional = other_additional.as_ref().unwrap(); + if out_additional != their_additional { + return false; + } + } + + return true; + }, + _ => false + } + } + Value::QuantumPromise(qubits, projection) => { + // Forward the equality to the other type unless we're both promises. + match other { + Value::QuantumPromise(other_qubits, other_projection) => { + if other_qubits == qubits && Ptr::eq(projection, other_projection) { + return true; + } + + // Even if you're the same projection, comparing against different qubits requires value analysis. + projection.is_equal_for( + other_projection.deref(),Some(&other_qubits.iter().map(|val| val.index).collect())) + } + _ => { + other == self + } + } + } + Value::AnalysisResult(ar) => other.try_as_analysis_result().map_or(false, |other_ar| *ar == other_ar), + Value::Callable(call) => other.try_as_callable().map_or(false, |other_call| *call == other_call) + } + } +} + +impl PartialOrd for Value { + fn partial_cmp(&self, other: &Self) -> Option { + match self { + Value::Byte(b) => b.partial_cmp(&other.as_byte()), + Value::Short(s) => s.partial_cmp(&other.as_short()), + Value::Int(i) => i.partial_cmp(&other.as_int()), + Value::Long(l) => l.partial_cmp(&other.as_long()), + Value::Float(f) => f.partial_cmp(&other.as_float()), + Value::Bool(b) => b.partial_cmp(&other.as_bool()), + Value::String(str_) => str_.partial_cmp(&other.as_string()), + _ => None + } + } +} + +impl Eq for Value { +} + +fn value_bitand(lhs: &Value, rhs: &Value) -> Value { + match lhs { + Value::Byte(b) => Value::from(b & rhs.as_byte()), + Value::Short(s) => Value::from(s & rhs.as_short()), + Value::Int(i) => Value::from(i & rhs.as_int()), + Value::Long(l) => Value::from(l & rhs.as_long()), + Value::Bool(b) => Value::from(b & rhs.as_bool()), + _ => panic!("Attempted | on {} and {} which is illegal.", lhs.to_string(), rhs.to_string()) + } +} + +impl BitAnd for Value { + type Output = Value; + fn bitand(self, rhs: Self) -> Self::Output { value_bitand(&self, &rhs) } +} + +impl BitAnd for &Value { + type Output = Value; + fn bitand(self, rhs: Self) -> Self::Output { value_bitand(&self, &rhs) } +} + +impl BitAnd for &mut Value { + type Output = Value; + fn bitand(self, rhs: Self) -> Self::Output { value_bitand(&self, &rhs) } +} + +fn value_bitor(lhs: &Value, rhs: &Value) -> Value { + match lhs { + Value::Byte(b) => Value::from(b | rhs.as_byte()), + Value::Short(s) => Value::from(s | rhs.as_short()), + Value::Int(i) => Value::from(i | rhs.as_int()), + Value::Long(l) => Value::from(l | rhs.as_long()), + Value::Bool(b) => Value::from(b | rhs.as_bool()), + _ => panic!("Attempted | on {} and {} which is illegal.", lhs.to_string(), rhs.to_string()) + } +} + +impl BitOr for Value { + type Output = Self; + fn bitor(self, rhs: Self) -> Self::Output { value_bitor(&self, &rhs) } +} + +impl BitOr for &Value { + type Output = Value; + fn bitor(self, rhs: Self) -> Self::Output { value_bitor(self, rhs) } +} + +impl BitOr for &mut Value { + type Output = Value; + fn bitor(self, rhs: Self) -> Self::Output { value_bitor(&self, &rhs) } +} + +fn value_bitxor(lhs: &Value, rhs: &Value) -> Value { + match lhs { + Value::Byte(b) => Value::from(b ^ rhs.as_byte()), + Value::Short(s) => Value::from(s ^ rhs.as_short()), + Value::Int(i) => Value::from(i ^ rhs.as_int()), + Value::Long(l) => Value::from(l ^ rhs.as_long()), + Value::Bool(b) => Value::from(b ^ rhs.as_bool()), + _ => panic!("Attempted ^ on {} and {} which is illegal.", lhs.to_string(), rhs.to_string()) + } +} + +impl BitXor for &mut Value { + type Output = Value; + fn bitxor(self, rhs: Self) -> Self::Output { value_bitxor(&self, &rhs) } +} + +impl BitXor for Value { + type Output = Value; + fn bitxor(self, rhs: Self) -> Self::Output { value_bitxor(&self, &rhs) } +} + +impl BitXor for &Value { + type Output = Value; + fn bitxor(self, rhs: Self) -> Self::Output { value_bitxor(self, rhs) } +} + +fn value_subtract(lhs: &Value, rhs: &Value) -> Value { + match lhs { + Value::Byte(b) => Value::Byte(b - rhs.as_byte()), + Value::Short(s) => Value::Short(s - rhs.as_short()), + Value::Int(i) => Value::Int(i - rhs.as_int()), + Value::Long(l) => Value::Long(l - rhs.as_long()), + Value::Float(f) => Value::Float(f - rhs.as_float()), + _ => panic!("Can't subtract these two values: {} - {}.", lhs.to_string(), rhs.to_string()) + } +} + +impl ops::Sub for Value { + type Output = Value; + fn sub(self, rhs: Self) -> Self::Output { + value_subtract(self.borrow(), rhs.borrow()) + } +} + +impl ops::Sub for &Value { + type Output = Value; + fn sub(self, rhs: Self) -> Self::Output { + value_subtract(self.borrow(), rhs.borrow()) + } +} + +impl ops::Sub for &mut Value { + type Output = Value; + fn sub(self, rhs: Self) -> Self::Output { + value_subtract(self.borrow(), rhs.borrow()) + } +} + +fn value_add(lhs: &Value, rhs: &Value) -> Value { + fn larger_type(val: &Value) -> Option { + match val { + Value::Bool(_) => Some(1), + Value::Byte(_) => Some(2), + Value::Short(_) => Some(3), + Value::Int(_) => Some(4), + Value::Float(_) => Some(5), + Value::Long(_) => Some(6), + _ => None + } + } + + // Switch operations so the larger numeric type is always on the left. + // Means if we have Long + Short or Short + Long resultant type is always + // the larger one. + let (lhs, rhs) = + if let (Some(left_val), Some(right_val)) = (larger_type(lhs), larger_type(rhs)) { + (rhs, lhs) + } else { + (lhs, rhs) + }; + + // Special-case strings, since if either is a string we want to stringify them together. + // TODO: Dislike match case, add helper types to Value if needed. + if match rhs { Value::String(_) => true, _ => false } + || match lhs { Value::String(_) => true, _ => false } { + let mut root = String::new(); + let left_val = lhs.try_as_string().map_or_else(|| lhs.to_string(), |val| val); + let right_val = rhs.try_as_string().map_or_else(|| rhs.to_string(), |val| val); + root.push_str(left_val.as_str()); + root.push_str(right_val.as_str()); + return Value::String(root); + } + + match lhs { + Value::Byte(b) => Value::Byte(b + rhs.as_byte()), + Value::Short(s) => Value::Short(s + rhs.as_short()), + Value::Int(i) => Value::Int(i + rhs.as_int()), + Value::Long(l) => Value::Long(l + rhs.as_long()), + Value::Float(f) => Value::Float(f + rhs.as_float()), + Value::Array(array) => { + let potential_array = rhs.try_as_array(); + if let Some(other) = potential_array { + let mut result = Vec::new(); + for val in array.iter() { + result.push(val.clone()); + } + + for val in other.iter() { + result.push(val.clone()); + } + return Value::Array(result); + } + + panic!("Can't add these two values: {} + {}.", lhs.to_string(), rhs.to_string()) + } + _ => panic!("Can't add these two values: {} + {}.", lhs.to_string(), rhs.to_string()) + } +} + +impl ops::Add for Value { + type Output = Value; + fn add(self, rhs: Self) -> Self::Output { + value_add(self.borrow(), rhs.borrow()) + } +} + +impl ops::Add for &Value { + type Output = Value; + fn add(self, rhs: Self) -> Self::Output { + value_add(self.borrow(), rhs.borrow()) + } +} + +impl ops::Add for &mut Value { + type Output = Value; + fn add(self, rhs: Self) -> Self::Output { + value_add(self.borrow(), rhs.borrow()) + } +} + +fn value_divide(lhs: &Value, rhs: &Value) -> Value { + match lhs { + Value::Byte(b) => Value::Byte(b / rhs.as_byte()), + Value::Short(s) => Value::Short(s / rhs.as_short()), + Value::Int(i) => Value::Int(i / rhs.as_int()), + Value::Long(l) => Value::Long(l / rhs.as_long()), + Value::Float(f) => Value::Float(f / rhs.as_float()), + _ => panic!("Can't divide these two values: {} / {}.", lhs.to_string(), rhs.to_string()) + } +} + +impl ops::Div for Value { + type Output = Value; + fn div(self, rhs: Self) -> Self::Output { + value_divide(self.borrow(), rhs.borrow()) + } +} + +impl ops::Div for &Value { + type Output = Value; + fn div(self, rhs: Self) -> Self::Output { + value_divide(self.borrow(), rhs.borrow()) + } +} + +impl ops::Div for &mut Value { + type Output = Value; + fn div(self, rhs: Self) -> Self::Output { + value_divide(self.borrow(), rhs.borrow()) + } +} + +fn value_multiply(lhs: &Value, rhs: &Value) -> Value { + match lhs { + Value::Byte(b) => Value::Byte(b * rhs.as_byte()), + Value::Short(s) => Value::Short(s * rhs.as_short()), + Value::Int(i) => Value::Int(i * rhs.as_int()), + Value::Long(l) => Value::Long(l * rhs.as_long()), + Value::Float(f) => Value::Float(f * rhs.as_float()), + _ => panic!("Can't multiply these two values: {} * {}.", lhs.to_string(), rhs.to_string()) + } +} + +impl ops::Mul for Value { + type Output = Value; + fn mul(self, rhs: Self) -> Self::Output { + value_multiply(self.borrow(), rhs.borrow()) + } +} + +impl ops::Mul for &Value { + type Output = Value; + fn mul(self, rhs: Self) -> Self::Output { + value_multiply(self.borrow(), rhs.borrow()) + } +} + +impl ops::Mul for &mut Value { + type Output = Value; + fn mul(self, rhs: Self) -> Self::Output { + value_multiply(self.borrow(), rhs.borrow()) + } +} + +impl Display for Value { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Value::Empty => "empty".to_string(), + Value::Byte(b) => b.to_string(), + Value::Short(s) => s.to_string(), + Value::Int(i) => i.to_string(), + Value::Long(l) => l.to_string(), + Value::Bool(b) => b.to_string(), + Value::Float(f) => f.to_string(), + Value::String(s) => format!("\"{}\"", s.clone()), + Value::Qubit(qb) => qb.to_string(), + Value::Array(vec) => { + let mut stringified = vec.iter().take(5).map(|val| val.to_string()).collect::>(); + if vec.len() > 5 { + stringified.push(format!("... ({} more)", vec.len() - 5).to_string()); + } + format!("[{}]", stringified.join(", ")) + }, + Value::Ref(ref_, further) => { + further.as_ref().map_or_else(|| ref_.clone(), |val| format!("{}[{}]", ref_.clone(), val.to_string())) + }, + Value::QuantumPromise(qbs, proj) => format!("deferred execution of {} for {}", proj.to_string(), qbs.iter().map(|val| val.to_string()).collect::>().join(",")), + Value::AnalysisResult(ar) => ar.to_string(), + Value::Pauli(p) => p.to_string(), + Value::Callable(call) => format!( + "Callable for {} with {}", + call.analysis_graph.identity, + call.argument_mappings.iter().map(|(key, val)| format!("{} = {}", key.clone(), val.to_string())).collect::>().join(", ")) + }.as_str()) + } +} + +/// Helper macro to build the Value to/from methods. +macro_rules! value_into { + ($target:ty, $err_message:literal, $val_type:tt) => { + impl From for $target { + fn from(value: Value) -> Self { + match value { Value::$val_type(val) => val, _ => panic!("This Value isn't a {}", stringify!($val_type)) } + } + } + + impl From<&Value> for $target { + fn from(value: &Value) -> Self { + match value { Value::$val_type(val) => *val, _ => panic!("This Value isn't a {}", stringify!($val_type)) } + } + } + + impl From<$target> for Value { + fn from(value: $target) -> Self { + Value::$val_type(value) + } + } + } +} + +value_into!(f64, "float", Float); +value_into!(i8, "byte", Byte); +value_into!(i16, "short", Short); +value_into!(i64, "int", Int); +value_into!(i128, "long", Long); +value_into!(bool, "bool", Bool); + +/// All generalized gates. We don't add the adjoints here because those are just applied +/// to the rotational values themselves. +/// +/// TODO: Currently we have both distinct rotations around the axis as well as an R. We could +/// squash everything into R's with a pauli, but is there a good reason for keeping them split? +pub enum Gate { + /// Qubit + I(Ptr), + + /// Qubit, theta, phi, lambda. + U(Ptr, Ptr, Ptr, Ptr), + + /// Pauli, Qubit, theta. + R(Ptr, Ptr, Ptr), + + /// Qubit, theta. + X(Ptr, Ptr), + Y(Ptr, Ptr), + Z(Ptr, Ptr), + + /// Pauli, Controllers, target, theta. + CR(Ptr, Ptr, Ptr, Ptr), + + /// Controllers, target, theta. + CX(Ptr, Ptr, Ptr), + CZ(Ptr, Ptr, Ptr), + CY(Ptr, Ptr, Ptr), + + /// Pauli, qubits, result variable. + Measure(Ptr, Ptr, Ptr) +} + +pub struct GateBuilder {} + +impl GateBuilder { + /// See [Gate::I]. + pub fn I(qubit: Value) -> Gate { + Gate::I(Ptr::from(qubit)) + } + + /// See [Gate::U]. + pub fn U(qubit: Value, theta: Value, phi: Value, lambda: Value) -> Gate { + Gate::U( + Ptr::from(qubit), + Ptr::from(theta), + Ptr::from(phi), + Ptr::from(lambda)) + } + + /// See [Gate::R]. + pub fn R(pauli: Value, qubit: Value, theta: Value) -> Gate { + Gate::R( + Ptr::from(pauli), + Ptr::from(qubit), + Ptr::from(theta)) + } + + pub fn X(qubit: Value, theta: Value) -> Gate { + GateBuilder::R(Value::Pauli(Pauli::X), qubit, theta) + } + + pub fn Y(qubit: Value, theta: Value) -> Gate { + GateBuilder::R(Value::Pauli(Pauli::Y), qubit, theta) + } + + pub fn Z(qubit: Value, theta: Value) -> Gate { + GateBuilder::R(Value::Pauli(Pauli::Z), qubit, theta) + } + + /// See [Gate::CR]. + pub fn CR(pauli: Value, controllers: Value, target: Value, theta: Value) -> Gate { + Gate::CR(Ptr::from(pauli), Ptr::from(controllers), Ptr::from(target), Ptr::from(theta)) + } + + pub fn CX(controllers: Value, target: Value, theta: Value) -> Gate { + GateBuilder::CR(Value::Pauli(Pauli::X), controllers, target, theta) + } + + pub fn CZ(controllers: Value, target: Value, theta: Value) -> Gate { + GateBuilder::CR(Value::Pauli(Pauli::Z), controllers, target, theta) + } + + pub fn CY(controllers: Value, target: Value, theta: Value) -> Gate { + GateBuilder::CR(Value::Pauli(Pauli::Y), controllers, target, theta) + } + + /// See [Gate::Measure]. + pub fn Measure(pauli: Value, qubits: Value, results: Value) -> Gate { + Gate::Measure(Ptr::from(pauli), Ptr::from(qubits), Ptr::from(results)) + } +} + +impl Display for Gate { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(match self { + Gate::I(qb) => { format!("I {}", qb.to_string()) } + Gate::U(qb, theta, phi, lambda) => { + format!("U[{}] theta: {}, phi: {}, lambda: {}", qb, theta, phi, lambda) + } + Gate::X(qb, radian) => { format!("X[{}] {}", qb, radian) } + Gate::Y(qb, radian) => { format!("Y[{}] {}", qb, radian) } + Gate::Z(qb, radian) => { format!("Z[{}] {}", qb, radian) } + Gate::CX(cont, target, radian) => { + format!("CX[{}->{}] {}", cont, target, radian) + } + Gate::CZ(cont, target, radian) => { + format!("CZ[{}->{}] {}", cont, target, radian) + } + Gate::CY(cont, target, radian) => { + format!("CY[{}->{}] {}", cont, target, radian) + } + Gate::Measure(paulis, qbs, target) => { + format!("{} = measure {} across {}", target, qbs, paulis) + } + Gate::R(pauli, qubit, val) => format!("R{}[{}] {}", pauli.to_string(), qubit.to_string(), val.to_string()), + Gate::CR(pauli, cont, target, radian) => { + format!("C{}[{}->{}] {}", pauli, cont, target, radian) + } + }.as_str()) + } +} diff --git a/src/munchkin/pykin/src/lib.rs b/src/munchkin/pykin/src/lib.rs new file mode 100644 index 0000000..11a2c0b --- /dev/null +++ b/src/munchkin/pykin/src/lib.rs @@ -0,0 +1,85 @@ +#![warn(clippy::all, clippy::pedantic)] +#![allow(non_upper_case_globals, non_snake_case, unused_macros, dead_code, unused_variables, unused_must_use)] +#![allow(clippy::needless_pass_by_value)] +#![feature(get_mut_unchecked)] +#![feature(unwrap_infallible)] +#![feature(strict_provenance)] + +extern crate core; + +use std::env::{current_exe}; +use std::fs::{File}; +use log::{Level, LevelFilter, log, log_enabled}; + +mod python; +mod execution; +mod smart_pointers; +mod runtime; +mod builders; +mod hardware; +mod instructions; +mod evaluator; +mod graphs; +mod analysis; + +const DEFAULT_LOG_FILE: &str = "mk_logs.txt"; + +/// Native initialization of the loggers. Defaults to executable position if deployed, if it +/// detects it's in development mode it'll move log file back up the folder tree. +#[ctor::ctor] +fn native_logger_initialize() { + let path = if let Ok(val) = current_exe() { + // If we're embedded we need to be given a different file path to log too. + if val.ends_with("python.exe") { + return; + } + + let current_folder = val.parent().unwrap(); + + // Walk back to root munchkin folder if we're in a build, otherwise at that folder level. + if current_folder.ends_with("deps") { + Some(current_folder.parent().unwrap().parent().unwrap().parent().unwrap().join(DEFAULT_LOG_FILE).to_str().unwrap().to_string()) + } else { + Some(current_folder.join(DEFAULT_LOG_FILE).to_str().unwrap().to_string()) + } + } else { + None + }; + + initialize_loggers(path); + log!(Level::Info, "Initialized on library startup."); +} + +fn initialize_loggers(log_path: Option) { + // If we've already been enabled, just do nothing. + if log_enabled!(Level::Error) { + return; + } + + let mut appended_messages = Vec::new(); + if let Some(logging_path) = log_path { + let file = File::create(logging_path.clone()); + if let Ok(file) = file { + // TODO: Just print to both commandline and file. + let target = Box::new(file); + env_logger::builder() + .target(env_logger::Target::Pipe(target)) + .filter_level(LevelFilter::Debug) + .format_suffix("\n") + .init(); + + log!(Level::Info, "File logging initialized."); + return; + } + + appended_messages.push(format!("Attempted to open file at {} to log, failed with: {}", logging_path, file.err().unwrap().to_string())); + } + + // If we're fallen through previous forms of logger init have failed. + env_logger::builder().filter_level(LevelFilter::Debug).init(); + + log!(Level::Info, "Commandline logging initialized."); + for val in appended_messages { + log!(Level::Info, "{}", val); + } +} \ No newline at end of file diff --git a/src/munchkin/pykin/src/python.rs b/src/munchkin/pykin/src/python.rs new file mode 100644 index 0000000..5524570 --- /dev/null +++ b/src/munchkin/pykin/src/python.rs @@ -0,0 +1,301 @@ +use std::borrow::Borrow; +use bitflags::Flags; +use log::{Level, log, log_enabled}; +use pyo3::prelude::*; +use pyo3::exceptions::{PyValueError}; +use pyo3::types::{PyBool, PyFloat, PyInt, PyList, PyString}; +use crate::builders::PythonEngine; +use crate::execution::{parse_file, run_file, run_graph}; +use crate::graphs::ExecutableAnalysisGraph; +use crate::{DEFAULT_LOG_FILE, initialize_loggers}; +use crate::instructions::Value; +use crate::runtime::ActiveTracers; +use crate::smart_pointers::{Ptr}; + +#[pymodule] +fn _native(_py: Python, m: &PyModule) -> PyResult<()> { + m.add_class::()?; + m.add_function(wrap_pyfunction!(initialize_file_logger, m)?); + m.add_function(wrap_pyfunction!(initialize_commandline_logger, m)?); + m.add("DEFAULT_LOG_FILE", DEFAULT_LOG_FILE); + Ok(()) +} + +/// Proxy for initializing Munchkin loggers. Pass in path for file logger initialization. +#[pyfunction] +fn initialize_file_logger(file_path: &str) { + initialize_loggers(Some(file_path.to_string())); +} + +#[pyfunction] +fn initialize_commandline_logger() { + initialize_loggers(None); +} + +impl ToPyObject for Value { + fn to_object(&self, py: Python<'_>) -> PyObject { + match self { + Value::Empty => py.None(), + Value::Byte(nested) => nested.to_object(py), + Value::Short(nested) => nested.to_object(py), + Value::Int(nested) => nested.to_object(py), + Value::Long(nested) => nested.to_object(py), + Value::Bool(nested) => nested.to_object(py), + Value::Float(nested) => nested.to_object(py), + Value::String(nested) => nested.to_object(py), + Value::AnalysisResult(nested) => nested.distribution.to_object(py), + Value::Array(nested) => nested.iter().map(|val| val.to_object(py)).collect::>().to_object(py), + _ => panic!("Can't return this type.") + } + } +} + +impl FromPyObject<'_> for Value { + fn extract(ob: &PyAny) -> PyResult { + let transformed = if ob.is_instance_of::().is_ok_and(|val| val) { + let value: i128 = ob.extract().expect(format!("Can't map {} to Munchkin value.", ob.to_string()).as_str()); + Value::Long(value) + } else if ob.is_instance_of::().is_ok_and(|val| val) { + let value: f64 = ob.extract().expect(format!("Can't map {} to Munchkin value.", ob.to_string()).as_str()); + Value::Float(value) + } else if ob.is_instance_of::().is_ok_and(|val| val) { + let value: bool = ob.extract().expect(format!("Can't map {} to Munchkin value.", ob.to_string()).as_str()); + Value::Bool(value) + } else if ob.is_instance_of::().is_ok_and(|val| val) { + let value: String = ob.extract().expect(format!("Can't map {} to Munchkin value.", ob.to_string()).as_str()); + Value::String(value) + } else { + return Err(PyValueError::new_err("Can't resolve Python value to Munchkin value.")); + }; + + Ok(transformed) + } +} + +/// Python wrapper around an execution graph. Currently used for simply passing things around for +/// the API's. Later it'll expose more internal operations for the graph itself for +/// mutations/changes from Python. +#[pyclass] +#[derive(Clone)] +pub(crate) struct Graph { + pub wrapped: Ptr +} + +impl Graph { + pub fn new(graph: &Ptr) -> Graph { + activate_fallback_logger(); + Graph { wrapped: graph.clone() } + } +} + +/// People should set up loggers before they call our Python bindings, but if they don't we want +/// to make sure our execution chain still outputs things correctly. +/// +/// This call should be the first line in any Rust/Python boundary. Mostly constructors and +/// free methods. +fn activate_fallback_logger() { + if !log_enabled!(Level::Error) { + initialize_commandline_logger(); + log!(Level::Info, "Logger not initialized, defaulting to commandline."); + } +} + +#[pyclass] +pub(crate) struct Executor { + tracing: ActiveTracers, +} + +#[pymethods] +impl Executor { + #[new] + fn new() -> Self { + activate_fallback_logger(); + Executor { tracing: ActiveTracers::empty() } + } + + fn trace_runtime(&mut self) { + self.tracing.insert(ActiveTracers::Runtime); + } + + fn trace_projections(&mut self) { + self.tracing.insert(ActiveTracers::Projections); + } + + fn trace_graphs(&mut self) { + self.tracing.insert(ActiveTracers::Graphs); + } + + #[allow(clippy::unused_self)] + fn parse_file( + &self, file: &str, entry_point: Option<&str>) -> PyResult> { + Python::with_gil(|py| -> PyResult> { + parse_file(file, entry_point) + .map_err(PyValueError::new_err) + .map(|value| { + let result: Py = Py::new(py, Graph::new(value.borrow())) + .ok().expect("Unable to build Python graph representation."); + result + }) + }) + } + + fn run_graph(&self, graph: Py, arguments: &PyAny, + builder_adaptor: &PyAny, runtime_adaptor: &PyAny) -> PyResult { + Python::with_gil(|py| -> Result { + // We just build a reference directly here so our smart-pointer doesn't auto-drop. + let py_engine = Ptr::from( + PythonEngine::new(builder_adaptor, runtime_adaptor)); + + let graph: Graph = graph.extract(py).expect("Unable to extract graph."); + + let args: Vec = arguments.extract().expect("Unable to transform"); + + run_graph(graph.wrapped.borrow(), args.as_ref(), py_engine.borrow(), self.tracing.clone()) + .map_err(PyValueError::new_err) + .map(|value| { + value.map_or(py.None(), |val| val.to_object(py)) + }) + }) + } + + #[allow(clippy::unused_self)] + fn run( + &self, + file: &str, + builder_adaptor: &PyAny, + runtime_adaptor: &PyAny, + ) -> PyResult { + Python::with_gil(|py| -> Result { + self.run_with_args(file, PyList::empty(py), builder_adaptor, runtime_adaptor) + }) + } + + #[allow(clippy::unused_self)] + fn run_with_args( + &self, + file: &str, + arguments: &PyAny, + builder_adaptor: &PyAny, + runtime_adaptor: &PyAny, + ) -> PyResult { + Python::with_gil(|py| -> Result { + // We just build a reference directly here so our smart-pointer doesn't auto-drop. + let py_engine = Ptr::from( + PythonEngine::new(builder_adaptor, runtime_adaptor)); + + let args: Vec = arguments.extract()?; + + run_file(file, &args, py_engine.borrow(), None, self.tracing.clone()) + .map_err(PyValueError::new_err) + .map(|value| { + value.map_or(py.None(), |val| val.to_object(py)) + }) + }) + } +} + +#[cfg(test)] +mod tests { + use std::collections::HashMap; + use std::fs::{canonicalize, File, remove_file}; + use std::io::{BufRead, BufReader, Read}; + use std::path::PathBuf; + use pyo3::{PyAny, PyObject, PyResult, Python}; + use pyo3::types::{PyList, PyModule}; + use crate::python::{_native, Executor}; + + fn python_from<'a>(py: Python<'a>, file: &str, name: &str) -> &'a PyAny { + PyModule::from_code(py, file, "", "").unwrap() + .getattr(name).unwrap().call0().expect("Unable to call Python method/constructor.") + } + + fn assert_default_results(py: Python, results: PyResult) { + let rust_results: HashMap = results.expect("Results need to exist.") + .extract(py).expect("Results aren't the right type."); + + assert_eq!(rust_results.get("00").expect("Key should exist"), &250); + assert_eq!(rust_results.get("01").expect("Key should exist"), &250); + assert_eq!(rust_results.get("10").expect("Key should exist"), &250); + assert_eq!(rust_results.get("11").expect("Key should exist"), &251); + } + + #[test] + fn no_args() { + Python::with_gil(|py| { + let relative_path = canonicalize("../../tests/files/qir/generator-bell.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let adaptor_file = include_str!("../../tests/rust_python_integration.py"); + let builder = python_from(py, adaptor_file, "BuilderAdaptor"); + let runtime = python_from(py, adaptor_file, "RuntimeAdaptor"); + + let walker = Executor::new(); + let results = walker.run(path, builder, runtime); + assert_default_results(py, results); + }); + } + + #[test] + fn with_args() { + Python::with_gil(|py| { + let relative_path = canonicalize("../../tests/files/qir/generator-bell.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let adaptor_file = include_str!("../../tests/rust_python_integration.py"); + let builder = python_from(py, adaptor_file, "BuilderAdaptor"); + let runtime = python_from(py, adaptor_file, "RuntimeAdaptor"); + let args = python_from(py, adaptor_file, "build_args"); + + let walker = Executor::new(); + let results = walker.run_with_args(path, args, builder, runtime); + assert_default_results(py, results); + }); + } + + #[test] + fn invalid_args() { + Python::with_gil(|py| { + let relative_path = canonicalize("../../tests/files/qir/generator-bell.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let adaptor_file = include_str!("../../tests/rust_python_integration.py"); + let builder = python_from(py, adaptor_file, "BuilderAdaptor"); + let runtime = python_from(py, adaptor_file, "RuntimeAdaptor"); + let args = python_from(py, adaptor_file, "build_invalid_args"); + + let walker = Executor::new(); + walker.run_with_args(path, args, builder, runtime) + .expect_err("Invalid args passed, should error."); + }); + } + + #[test] + fn parse_graph() { + Python::with_gil(|py| { + let relative_path = canonicalize("../../tests/files/qir/generator-bell.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let walker = Executor::new(); + let parsed_graph = walker.parse_file(path, None).expect("Unable to parse graph."); + }); + } + + #[test] + fn parse_and_execute() { + Python::with_gil(|py| { + let relative_path = canonicalize("../../tests/files/qir/generator-bell.ll").unwrap(); + let path = relative_path.to_str().unwrap(); + + let adaptor_file = include_str!("../../tests/rust_python_integration.py"); + let builder = python_from(py, adaptor_file, "BuilderAdaptor"); + let runtime = python_from(py, adaptor_file, "RuntimeAdaptor"); + + let walker = Executor::new(); + let parsed_graph = walker.parse_file(path, None); + let results = walker.run_graph( + parsed_graph.expect("Graph should be parsable"), PyList::empty(py), builder, runtime); + + assert_default_results(py, results); + }); + } +} diff --git a/src/munchkin/pykin/src/runtime.rs b/src/munchkin/pykin/src/runtime.rs new file mode 100644 index 0000000..2cacf60 --- /dev/null +++ b/src/munchkin/pykin/src/runtime.rs @@ -0,0 +1,984 @@ +use std::borrow::{Borrow, BorrowMut}; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::fmt::{Display, Formatter}; +use std::ops::{Deref, DerefMut}; +use bitflags::{bitflags}; +use log::{Level, log}; +use crate::analysis::{QuantumOperations, QuantumProjection}; +use crate::with_mutable; +use crate::evaluator::EvaluationContext; +use crate::execution::EngineCollection; +use crate::graphs::{AnalysisGraph, walk_logical_paths, Node, ExecutableAnalysisGraph}; +use crate::hardware::Qubit; +use crate::instructions::{Condition, Equalities, Expression, Gate, Instruction, LambdaModifier, Operator, Pauli, Value}; +use crate::smart_pointers::*; + + +/// Assign an order to nodes so we're able to tell trivially when one is further in the graph +/// or not. +fn order_nodes(graph: &Ptr) { + let mut inc = 0; + for mut node in walk_logical_paths(graph) { + node.order = Some(inc); + inc += 1; + } +} + +/// Scope variable lifetimes to their loops and branches. Used to be able to detect when +/// we can reset variables per-loop. +fn scope_variables(graph: &Ptr, context: &Ptr) { + let mut guard = HashSet::new(); + scope_variables_rec(graph, context, &mut guard); +} + +fn scope_variables_rec(graph: &Ptr, context: &Ptr, guard: &mut HashSet) { + if guard.contains(&graph.identity) { + return + } + + guard.insert(graph.identity.clone()); + + with_mutable!(context.scopes.insert(graph.identity.clone(), Ptr::from(HashMap::new()))); + let mut active_scopes = VecDeque::new(); + for node in walk_logical_paths(graph) { + let node_order = node.order.expect("Should be ordered"); + let inc_nodes = node.incoming_nodes(); + let backward_jumps = inc_nodes.iter().filter(|val| + val.1.order.expect("Should be ordered") > node_order).map(|val| &val.1).collect::>(); + if !backward_jumps.is_empty() { + for jmp in backward_jumps { + let mut var_scope = VariableScopes::new(); + var_scope.start = node.order.expect("Need order to scope."); + var_scope.end = jmp.order.expect("Need order to scope."); + active_scopes.push_back(var_scope); + } + } + + if !active_scopes.is_empty() { + match node.instruction.deref() { + Instruction::Assign(var, _) | + Instruction::Arithmatic(var, _, _, _) | + Instruction::Condition(var, _) => { + for scope in active_scopes.iter_mut() { + scope.captured_variables.insert(var.clone()); + } + } + Instruction::Expression(_, var_opt) | + Instruction::Subgraph(_, var_opt) => { + if let Some(var) = var_opt { + for scope in active_scopes.iter_mut() { + scope.captured_variables.insert(var.clone()); + } + } + } + Instruction::NoOp | + Instruction::Initialize() | + Instruction::Reset(_) | + Instruction::ActivateQubit(_, _) | + Instruction::DeactivateQubit(_) | + Instruction::Gate(_) | + Instruction::Return(_) | + Instruction::Label(_) | + Instruction::Throw(_) | + Instruction::Log(_) => {} + } + } + } + + if let Some(results) = context.scopes.get(&graph.identity) { + for scopes in active_scopes { + if !scopes.captured_variables.is_empty() { + with_mutable!(results.insert(scopes.start.clone(), scopes)); + } + } + } +} + +/// Get the next node that this graph is going to execute against. +fn get_next_node(current_node: &mut Ptr, context: &Ptr) -> Ptr { + // We look at conditional paths first, see if any have been activated. + let mut outgoing_ifs = current_node.outgoing_conditional_nodes(); + let mut conditional_path = outgoing_ifs + .iter_mut() + .filter(|val| check_condition(val.0.conditions.as_ref().unwrap().borrow(), context)) + .collect::>(); + let next_target = match conditional_path.first_mut() { + None => current_node.next_node().expect("Has to have some value."), + Some(val) => (val.0.clone(), val.1.clone()) + }; + + // Do any value assignments if neccessary. + match next_target.0.assignments.as_ref() { + Some(assignments) => { + for (assign, value) in assignments { + // If we're assigning the same value skip assignment. + // TODO: Move to evaluator? Probably. + if let Value::Ref(ref_, addition) = value { + if assign == ref_ && addition.is_none() { + continue + } + } + + // We do a deep copy so the pointer dosen't get modified, but we then also follow + // the reference because either we're jumping forward or back - and in both cases + // the old value won't be needed. + // TODO: I don't like this, need more solid ruling like checking for backward + // jumps. + with_mutable!(context.add(assign, &follow_reference(&Ptr::from(value.clone()), context))); + } + }, _ => {} + } + + next_target.1.clone() +} + +pub fn check_condition(cond: &Condition, context: &Ptr) -> bool { + let left = follow_reference(&Ptr::from(cond.left.clone()), context); + let right = follow_reference(&Ptr::from(cond.right.clone()), context); + + match cond.equality { + Equalities::Equals => left.deref() == right.deref(), + Equalities::NotEquals => left.deref() != right.deref(), + Equalities::GreaterThan => left.deref() > right.deref(), + Equalities::LessThan => left.deref() < right.deref(), + Equalities::GreaterOrEqualThan => left.deref() >= right.deref(), + Equalities::LessOrEqualThan => left.deref() <= right.deref() + } +} + +/// Follows a Value::Ref to the value it's actually pointing at, which includes delving into arrays. +fn follow_reference(qx: &Ptr, context: &Ptr) -> Ptr { + match qx.deref() { + Value::Ref(ref_, additional) => { + let fetched_ref = context.get(ref_).expect(format!("Dangling variable: {}.", ref_.clone()).as_str()); + + // If we're self-referencing, just return a reference to the original value. + if let Value::Ref(target, target_add) = fetched_ref.deref() { + if ref_ == target && additional == target_add { + panic!("Circular reference found: {}.", fetched_ref.to_string()) + } + } + + let mut value = follow_reference(&fetched_ref, context); + additional.as_ref().map_or(value.clone(), |indexer | { + match value.deref_mut() { + Value::Array(array) => { + let index = follow_reference(indexer.borrow(), context).as_int() as usize; + let length = array.len(); + if index > length { + array.reserve(index - length + 1); + } + + if let Some(val) = array.get_mut(index) { + val.clone() + } else { + let value = Ptr::from(Value::Empty); + array.insert(index, value.clone()); + value + } + } + _ => panic!("Tried indexer on value that wasn't an array: {}.", value.to_string()) + } + }) + }, + _ => qx.clone() + } +} + +// TODO: Make return optional. + +impl Expression { + pub fn execute(&self, context: &Ptr) -> Ptr { + match self { + Expression::Clone(value) => { + follow_reference(&Ptr::from(value), context).clone() + } + Expression::Length(value) => { + let followed_ref = follow_reference(&Ptr::from(value), context); + let array = followed_ref.as_array(); + Ptr::from(Value::Int(array.len() as i64)) + } + Expression::NegateSign(value) => { + let followed = follow_reference(&Ptr::from(value), context); + Ptr::from(match followed.deref() { + Value::Byte(b) => Value::Byte(-b.clone()), + Value::Short(s) => Value::Short(-s.clone()), + Value::Int(i) => Value::Int(-i.clone()), + Value::Long(l) => Value::Long(-l.clone()), + Value::Float(f) => Value::Float(-f.clone()), + _ => panic!("Can't negate sign of {}", followed.to_string()) + }) + }, + Expression::Stringify(value) => { + let stringified_value = follow_reference(&Ptr::from(value.clone()), context).to_string(); + Ptr::from(Value::String(stringified_value)) + }, + Expression::ArgInjection(target, args) => { + // Swap the empty pointer for our callable and attach dynamic arguments. + let mut follow = follow_reference(&Ptr::from(target), context).as_callable(); + if let Some(args) = args { + follow.argument_mappings.insert("%arg-tuple".to_string(), Ptr::from(args.clone())); + } + + Ptr::from(target.clone()) + }, + Expression::MakeCtrlAdj(val, modifier) => { + let mut graph = follow_reference(&Ptr::from(val), context).as_callable(); + let id = graph.analysis_graph.identity.as_str(); + + let is_controlled = id.ends_with("ctl__wrapper"); + let is_adj = id.ends_with("adj__wrapper"); + + let slimmed_id = id.trim_end_matches("__ctrl__wrapper").trim_end_matches("__adj__wrapper"); + + let suffix = match modifier { + LambdaModifier::Ctl => if is_adj { "__ctladj__wrapper" } else { "__ctl__wrapper" }, + LambdaModifier::Adj => if is_controlled { "__ctladj__wrapper" } else { "__adj__wrapper" } + }; + + // If we have a graph, replace it + if let Some(new_graph) = context.method_graphs.get(suffix) { + graph.analysis_graph = new_graph.clone(); + } else { + panic!("Attempted swapping graph out for adjoint or controlled version, can't find wrapper."); + } + + Ptr::from(val.clone()) + } + } + } +} + + +bitflags! { + /// Flags enabling various runtime tracing operations. Turning these on will drastically + /// affect performance and should only be done to debug output and issues. + #[derive(Clone)] + pub struct ActiveTracers: u8 { + const Runtime = 1; + const Projections = 1 << 1; + const Graphs = 1 << 2; + } +} + +/// Tracing module for runtime for in-depth detailed logging of how our runtime functions. +pub struct TracingModule { + pub tracers: ActiveTracers +} + +impl TracingModule { + pub fn new() -> TracingModule { + TracingModule { tracers: ActiveTracers::empty() } + } + + pub fn with(tracers: ActiveTracers) -> TracingModule { + TracingModule { tracers: tracers.clone() } + } + + pub fn is_active(&self) -> bool { + !self.tracers.is_empty() + } + + pub fn has(&self, check_against: ActiveTracers) -> bool { + return self.tracers.contains(check_against) + } +} + +pub struct QuantumRuntime { + engines: Ptr, + trace_module: Ptr +} + +/// A runtime monitors, executes and maintains a cluster of graphs against the backend instances it +/// currently has available. +impl QuantumRuntime { + pub fn new(engines: &Ptr, tracer: ActiveTracers) -> QuantumRuntime { + QuantumRuntime { + engines: engines.clone(), + trace_module: Ptr::from(TracingModule::with(tracer)) + } + } + + /// Helper method since all checks are on one flag right now. + fn is_tracing(&self) -> bool { + self.trace_module.has(ActiveTracers::Runtime) + } + + /// Executes the passed-in graph against this runtime. + pub fn execute(&mut self, exe_graph: &Ptr, arguments: &Vec) -> Result>, String> { + let mut context = exe_graph.context.attach_runtime(&Ptr::from(self.borrow_mut())); + + // Assign the initial arguments going in. Just treat it like a normal method call based + // on ordinal positioning. We don't really need the input to include names. + + if exe_graph.callable_graph.argument_mappings.len() != arguments.len() { + let mut required_arguments = exe_graph.callable_graph.argument_mappings.keys() + .map(|val| val.clone()).collect::>().join(", "); + + if required_arguments.is_empty() { + required_arguments = String::from("no"); + } + + let mut supplied_arguments = arguments.iter().map(|val| val.to_string()).collect::>().join(", "); + if supplied_arguments.is_empty() { + supplied_arguments = String::from("none"); + } + + panic!("Root graph requires {required_arguments} to execute. Got given: {supplied_arguments}.") + } + + let mut index = 0; + for (key, _) in exe_graph.callable_graph.argument_mappings.iter() { + if let Some(value) = arguments.get(index as usize) { + context.add(key, &Ptr::from(value)); + } + index = index+1; + } + + // Loop through active graphs in this execution and perform pre-execution analysis. + // TODO: Should do this outside the executor, probably. + for subgraph in context.method_graphs.values() { + order_nodes(subgraph.borrow()); + scope_variables(subgraph.borrow(), &context); + } + + if self.trace_module.has(ActiveTracers::Graphs) { + log!(Level::Info, "Currently executing graph:\n{}", exe_graph); + } + + self._execute(exe_graph.callable_graph.analysis_graph.borrow(), &mut context).map(|val| { + if val.is_none() { + return None; + } + let val = follow_reference(&val.unwrap(), &context); + + Some(match val.deref() { + Value::QuantumPromise(qbs, proj) => { + Ptr::from(Value::AnalysisResult(Ptr::from(with_mutable!(proj.results_for(qbs))))) + } + Value::Array(arr) => Ptr::from(Value::Array(arr.iter() + .map(|val| follow_reference(val, &context)) + .map(|val| { + match val.deref() { + Value::QuantumPromise(qbs, proj) => { + Ptr::from(Value::AnalysisResult(Ptr::from(with_mutable!(proj.results_for(qbs))))) + } + _ => val.clone() + }}) + .collect::>())), + _ => val.clone() + }) + }) + } + + fn _execute(&mut self, graph: &Ptr, context: &mut Ptr) -> Result>, String> { + let mut entry_points = graph.entry_points(); + let starting_point = entry_points.first_mut(); + if starting_point.is_none() { + return Err(String::from("No entry-point available.")); + } + + let mut current_node = starting_point.unwrap().clone(); + + fn follow_qubit(val: &Ptr, context: &mut Ptr) -> Qubit { + follow_reference(val, context).as_qubit().clone() + } + + fn follow_float(val: &Ptr, context: &mut Ptr) -> f64 { + follow_reference(val, context).as_float() + } + + // Endlessly loop until the graph completes execution. + // TODO: Need infinite loop check-and-break. + let mut old_variables: HashMap> = HashMap::new(); + let mut available_scopes = with_mutable!(context.scopes.get_mut(&graph.identity)); + let mut seen_nodes = HashSet::new(); + loop { + if self.is_tracing() { + let mut changed_variables = Vec::new(); + let mut updated_variables = HashMap::new(); + for (key, value) in context.variables.iter() { + // We need to follow references to check for value differences. + let followed_value = follow_reference(value, context); + let changed = if let Some(old_var) = old_variables.get(key) { + old_var != &followed_value + } else { + true + }; + + if changed { + changed_variables.push(format!("({} = {})", key.clone(), followed_value.to_string())); + } + + // Arrays copy their pointers so get updated in-line. Need a full copy to check + // for differences, so enforce that here. + let copied_value = match followed_value.deref() { + Value::Array(arr) => { + Ptr::from(Value::Array(arr.iter().map(|val| val.clone_inner()).collect::>())) + }, + _ => value.clone_inner() + }; + + updated_variables.insert(key.clone(), copied_value); + } + + old_variables = updated_variables; + log!(Level::Info, "{} :: {}", current_node.to_string().as_str(), changed_variables.join(", ")); + } + + let node_id = current_node.id(); + if let Some(scopes) = available_scopes.as_mut() { + if let Some(scope) = scopes.get_mut(¤t_node.order.expect("Node ordering required.")) { + + // First time through dynamically remove assignments from the list which are + // external. This simplifies the previous scope analysis, moving this to the + // runtime instead. + if !seen_nodes.contains(&node_id) { + for key in context.globals.keys() { + scope.captured_variables.remove(key); + } + + for key in context.variables.keys() { + scope.captured_variables.remove(key); + } + + // Else we've looped, so remove the scoped variables we already know about. + } else { + if self.is_tracing() { + log!(Level::Info, "Looped, resetting [{}]", scope.captured_variables.iter().map(|val| val.as_str()).collect::>().join(", ")) + } + + for key in scope.captured_variables.iter() { + if context.variables.contains_key(key) { + context.variables.remove(&key.clone()); + } + } + } + } + } + + seen_nodes.insert(node_id); + + let instruction = ¤t_node.instruction; + match instruction.deref() { + Instruction::Return(results) => { + return Ok(Some(follow_reference(results.borrow(), context))); + } + Instruction::Label(_) => { } + Instruction::Throw(message) => { + return Err(message.as_ref().map_or("Unknown exception.".to_string(), |val| + follow_reference(&Ptr::from(val), context).as_string())); + } + Instruction::Log(message) => { + let followed = follow_reference(message, context); + let message = match followed.deref() { + Value::QuantumPromise(qb, proj) => { + // We concretize a promise if we see it being logged. + with_mutable!(proj.results_for(qb).to_string()) + } + _ => followed.to_string() + }; + + // Trim since we're just logging per-line. + log!(Level::Info, "{}", message.trim()); + } + Instruction::Subgraph(subgraph, var) => { + let subgraph = follow_reference(subgraph, context).as_callable(); + let mut subcontext = Ptr::from(context.create_subcontext()); + for (arg, value) in subgraph.argument_mappings.iter() { + // Need to deep-clone as the value sticks around in the Graph shell. + subcontext.variables.insert( + arg.clone(), follow_reference(&value, context).clone()); + } + + if self.is_tracing() { + log!(Level::Info, ""); + log!(Level::Info, "{} -->", subgraph.analysis_graph.identity); + } + + let results = self._execute(subgraph.analysis_graph.borrow(), subcontext.borrow_mut())?; + if let Some(target) = var { + let results = results.map_or( + Ptr::from(Value::Empty), + |val| val.clone() + ); + with_mutable!(context.add(target, results.borrow())); + } + + if self.is_tracing() { + log!(Level::Info, ""); + log!(Level::Info, "{} <--", graph.identity) + } + } + Instruction::Assign(variable, val) => { + // TODO: Move argument deep clone to a more centralized place. Right now assignment + // is assumed to be the only way values are created, but there's no reason + // they can't also be in-line. + let cloned_value = val.clone_inner(); + if let Value::Callable(callable) = cloned_value.deref() { + let more_dave = ""; + + for key in callable.argument_mappings.keys().clone() { + with_mutable!(callable.argument_mappings.insert(key.clone(), follow_reference(callable.argument_mappings.get(key).unwrap(), context))); + } + } + + let followed = follow_reference(&cloned_value, context); + with_mutable!(context.add(variable, followed.borrow())); + } + Instruction::Arithmatic(var, left, op, right) => { + let left = follow_reference(left, context); + let right = follow_reference(right, context); + + let result = Ptr::from(match op { + Operator::Multiply => left.deref() * right.deref(), + Operator::Divide => left.deref() / right.deref(), + Operator::Add => left.deref() + right.deref(), + Operator::Subtract => left.deref() - right.deref(), + Operator::Or => left.deref() | right.deref(), + Operator::And => left.deref() & right.deref(), + Operator::Xor => left.deref() ^ right.deref(), + }); + + with_mutable!(context.add(var, result.borrow())); + } + Instruction::Condition(var, condition) => { + let result = Ptr::from(Value::Bool(check_condition(condition.borrow(), context))); + with_mutable!(context.add(var, result.borrow())); + } + Instruction::ActivateQubit(var, opt_length) => { + if let Some(length) = opt_length { + let mut qubit_vec = Vec::new(); + for _ in 0..follow_reference(length, context).as_int() { + let new_qubit = context.activate_qubit(); + qubit_vec.push(Ptr::from(Value::Qubit(new_qubit.deref().clone()))); + } + + with_mutable!(context.add(var, &Ptr::from(Value::Array(qubit_vec)))); + } else { + let new_qubit = context.activate_qubit(); + with_mutable!(context.add(var, &Ptr::from(Value::Qubit(new_qubit.deref().clone())))); + } + } + Instruction::DeactivateQubit(qb) => { + let qb = follow_reference(qb, context); + match qb.deref() { + Value::Qubit(qb) => { + let mut current_projection = context.activate_projection(&qb); + current_projection.add(&Ptr::from(QuantumOperations::Reset(vec![qb.clone()]))); + context.deactivate_qubit(&qb); + } + Value::Array(array) => { + for value in array.iter() { + let followed = follow_reference(value, context); + let qubit = followed.as_qubit(); + + let mut current_projection = context.activate_projection(qubit); + current_projection.add(&Ptr::from(QuantumOperations::Reset(vec![qubit.clone()]))); + context.deactivate_qubit(qubit); + } + }, + _ => panic!("Not a qubit or an array of them. Can't deactivate.") + } + } + Instruction::Reset(qb) => { + let qb = qb.as_qubit(); + let proj = context.activate_projection(&qb); + + with_mutable!(proj.add(&Ptr::from(QuantumOperations::Reset(vec![qb.clone()])))); + } + Instruction::Gate(gate) => { + match gate.deref() { + Gate::I(qb) => { + let followed = follow_qubit(qb, context); + let mut projection = context.activate_projection(&followed); + projection.add(&Ptr::from(QuantumOperations::I(followed.clone()))); + } + Gate::U(qb, theta, phi, lambda) => { + let followed = follow_qubit(qb, context); + let mut projection = context.activate_projection(&followed); + projection.add( + &Ptr::from( + QuantumOperations::U( + followed.clone(), + follow_float(theta, context), + follow_float(phi, context), + follow_float(lambda, context) + ))); + } + + Gate::R(pauli, qubit, rot) => { + let followed = follow_qubit(qubit, context).clone(); + let radii = follow_float(rot, context); + let mut projection = context.activate_projection(&followed); + + match follow_reference(pauli, context).as_pauli() { + Pauli::I => { + projection.add(&Ptr::from(QuantumOperations::I(followed))); + } + Pauli::X => { + projection.add(&Ptr::from(QuantumOperations::X(followed, radii))); + } + Pauli::Z => { + projection.add(&Ptr::from(QuantumOperations::Z(followed, radii))); + } + Pauli::Y => { + projection.add(&Ptr::from(QuantumOperations::Y(followed, radii))); + } + } + } + Gate::X(qb, radii) => { + let followed = follow_qubit(qb, context); + let mut projection = context.activate_projection(&followed); + let radii = follow_float(radii, context); + projection.add(&Ptr::from(QuantumOperations::X(followed.clone(), radii))); + } + Gate::Y(qb, radii) => { + let followed = follow_qubit(qb, context); + let mut projection = context.activate_projection(&followed); + let radii = follow_float(radii, context); + projection.add(&Ptr::from(QuantumOperations::Y(followed.clone(), radii))); + } + Gate::Z(qb, radii) => { + let followed = follow_qubit(qb, context); + let mut projection = context.activate_projection(&followed); + let radii = follow_float(radii, context); + projection.add(&Ptr::from(QuantumOperations::Z(followed.clone(), radii))); + }, + Gate::CR(pauli, controls, target, rotation) => { + let pauli = follow_reference(pauli, context).as_pauli(); + let qubit = follow_qubit(target, context).clone(); + let rotation = follow_float(rotation, context); + let controls = match follow_reference(controls, context).deref() { + Value::Qubit(qb) => vec![qb.clone()], + Value::Array(arr) => arr.iter().map(|val| follow_qubit(val, context).clone()).collect(), + _ => Vec::new() + }; + + let mut projection = context.activate_projection(&qubit); + match pauli { + Pauli::I => {} + Pauli::X => { + projection.add( + &Ptr::from( + QuantumOperations::CX( + controls, + qubit, + rotation + ))); + } + Pauli::Z => { + projection.add( + &Ptr::from( + QuantumOperations::CZ( + controls, + qubit, + rotation + ))); + } + Pauli::Y => { + projection.add( + &Ptr::from( + QuantumOperations::CY( + controls, + qubit, + rotation + ))); + } + } + } + Gate::CX(control, target, radii) => { + // TODO: Multi qubit activation, deal with. + let followed = follow_qubit(target, context); + let rotation = follow_float(radii, context); + let mut projection = context.activate_projection(&followed); + let controls = match follow_reference(control, context).deref() { + Value::Qubit(qb) => vec![qb.clone()], + Value::Array(arr) => arr.iter().map(|val| follow_qubit(val, context).clone()).collect(), + _ => Vec::new() + }; + + projection.add( + &Ptr::from( + QuantumOperations::CX( + controls, + followed.clone(), + rotation + ))); + } + Gate::CZ(control, target, radii) => { + // TODO: Multi qubit activation, deal with. + let followed = follow_qubit(target, context); + let rotation = follow_float(radii, context); + let mut projection = context.activate_projection(&followed); + let controls = match follow_reference(control, context).deref() { + Value::Qubit(qb) => vec![qb.clone()], + Value::Array(arr) => arr.iter().map(|val| follow_qubit(val, context).clone()).collect(), + _ => Vec::new() + }; + + projection.add( + &Ptr::from( + QuantumOperations::CZ( + controls, + followed.clone(), rotation + ))); + } + Gate::CY(control, target, radii) => { + // TODO: Multi qubit activation, deal with. + let followed = follow_qubit(target, context); + let rotation = follow_float(radii, context); + let mut projection = context.activate_projection(&followed); + let controls = match follow_reference(control, context).deref() { + Value::Qubit(qb) => vec![qb.clone()], + Value::Array(arr) => arr.iter().map(|val| follow_qubit(val, context).clone()).collect(), + _ => Vec::new() + }; + + projection.add( + &Ptr::from( + QuantumOperations::CY( + controls, + followed.clone(), rotation + ))); + } + Gate::Measure(pauli, qbs, var) => { + let qubits = match follow_reference(qbs, context).deref() { + Value::Qubit(qb) => {vec![qb.clone()]} + Value::Array(array) => { + array.iter().map(|val| follow_reference(val, context).as_qubit().clone()).collect::>() + } + _ => panic!("Invalid qubit.") + }; + + let mut projection = context.activate_projection( + &qubits.iter().next().expect("Should have at least one qubit to measure.")); + projection.add(&Ptr::from(QuantumOperations::Measure(qubits.clone()))); + + let paulis = match follow_reference(pauli, context).deref() { + Value::Array(array) => { + array.iter().map(|val| follow_reference(val, context).as_pauli()).collect::>() + }, + val => vec![val.as_pauli()] + }; + + let promise = Ptr::from( + Value::QuantumPromise(qubits, projection.clone())); + + let followed_var = follow_reference(var, context); + let variable = if let Value::String(val) = followed_var.deref() { + val.clone() + } else { + format!("%cr_{}", followed_var.to_string()) + }; + + with_mutable!(context.add(&variable, promise.borrow())) + } + } + } + Instruction::Expression(expr, assign) => { + let result = expr.execute(context); + if let Some(variable) = assign { + with_mutable!(context.add(variable, result.borrow())); + } + } + + // Purposefully empty. + Instruction::NoOp | + Instruction::Initialize() => {} + } + + // If our node has no outward edges, we've finished. + if current_node.is_exit_node() { + break; + } + + let next_node = get_next_node(current_node.clone().borrow_mut(), context); + current_node = next_node; + } + + // Base profiles are a special case with no data-flow and just random operations that + // get picked up magically. In this case just return the global projection and force + // full qubit results. + if context.is_base_profile { + if let Some(projection) = context.projections.values_mut().next() { + return Ok(Some(Ptr::from(Value::AnalysisResult(Ptr::from(projection.results()))))); + } + } + + // If we haven't hit a return instruction, it's a method with no return. + Ok(None) + } +} + +pub struct VariableScopes { + captured_variables: HashSet, + + /// The start/end nodes of this particular scoping. + /// If we hit the start again we reset the assigned variables. + start: i64, + end: i64, +} + +impl VariableScopes { + pub fn new() -> VariableScopes { + VariableScopes { + captured_variables: HashSet::new(), + start: 0, + end: 0, + } + } +} + +impl Display for VariableScopes { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(format!("at {} with {}", self.start, self.captured_variables.iter().map(|val| val.as_str()).collect::>().join(", ")).as_str()) + } +} + +pub struct RuntimeContext { + pub globals: Ptr>>, + pub variables: HashMap>, + pub method_graphs: Ptr>>, + pub active_qubits: Ptr>>, + pub is_base_profile: bool, + + // TODO: Don't like this being everywhere, but it is a core object. + // Potentially change this back to POD object. + pub associated_runtime: Ptr, + + /// Map graph ID to variable scopings. + // TODO: Assign scopes to execution graphs, now we've split them. + pub scopes: Ptr>>>, + + projections: Ptr>> +} + +// TODO: Might want to split the concept of constant runtime data and variable data. The evaluated +// data is relatively constant. + +impl RuntimeContext { + pub fn new() -> RuntimeContext { + RuntimeContext { + globals: Ptr::from(HashMap::new()), + variables: HashMap::default(), + projections: Ptr::from(HashMap::new()), + active_qubits: Ptr::from(HashMap::new()), + scopes: Ptr::from(HashMap::new()), + method_graphs: Ptr::from(HashMap::new()), + associated_runtime: Ptr::default(), + is_base_profile: false, + } + } + + pub fn from_evaluation(context: &Ptr) -> RuntimeContext { + RuntimeContext { + globals: context.global_variables.clone_inner(), + variables: HashMap::default(), + projections: Ptr::from(HashMap::new()), + active_qubits: Ptr::from(HashMap::new()), + scopes: Ptr::from(HashMap::new()), + method_graphs: context.method_graphs.clone(), + associated_runtime: Ptr::default(), + is_base_profile: context.is_base_profile.deref().clone() + } + } + + pub fn create_subcontext(&self) -> RuntimeContext { + RuntimeContext { + globals: self.globals.clone(), + variables: HashMap::default(), + projections: self.projections.clone(), + active_qubits: self.active_qubits.clone(), + scopes: self.scopes.clone(), + method_graphs: self.method_graphs.clone(), + associated_runtime: self.associated_runtime.clone(), + is_base_profile: self.is_base_profile.clone() + } + } + + /// Create new subcontext associated with runtime. + pub fn attach_runtime(&self, runtime: &Ptr) -> Ptr { + let mut new_context = self.create_subcontext(); + new_context.associated_runtime = runtime.clone(); + Ptr::from(new_context) + } + + fn get_free_qubit(&mut self) -> Ptr { + // TODO: Brute-force so needs improvement but finding qubit gaps is important. + let mut inc: i64 = 0; + while self.active_qubits.contains_key(&inc) { + inc += 1; + } + + let new_qubit = Ptr::from(Qubit::new(inc)); + self.active_qubits.insert(inc, new_qubit.clone()); + new_qubit + } + + fn release_qubit(&mut self, qb: &Qubit) { + if let Some(qb) = self.active_qubits.get(&qb.index) { + let qbs = &self.active_qubits; + with_mutable!(qbs.remove(&qb.index)); + } + } + + pub fn add(&mut self, var: &String, val: &Ptr) { + if let Some(existing) = self.variables.get_mut(var) { + existing.expand_into(val); + } else { + self.variables.insert(var.clone(), val.clone()); + } + } + + pub fn has(&mut self, var: &String) -> bool { + self.variables.contains_key(var) + } + + pub fn remove(&mut self, var: &String) { + self.variables.remove(var); + } + + pub fn get(&self, var: &String) -> Option> { + self.variables.get(var.as_str()).map_or_else(|| { + self.globals.get(var.as_str()).map_or(None, |val| Some(val.clone_inner())) + }, |val| Some(val.clone())) + } + + pub fn activate_qubit(&mut self) -> Ptr { + let new_qubit = self.get_free_qubit(); + self.activate_projection(&new_qubit); + new_qubit + } + + pub fn deactivate_qubit(&mut self, qb: &Qubit) { + self.release_qubit(qb); + self.deactivate_projection(qb); + } + + /// Initializes projection for this qubit and returns. Will return currently-active projection + /// for qubit if it currently exists. + pub fn activate_projection(&mut self, qb: &Qubit) -> Ptr { + if let Some(proj) = self.projections.get(&qb.index) { + return proj.clone(); + } + + // In general running a single projection covers all current qubits, so we + // steal the one that's currently active if it's there. + let projection = if self.projections.is_empty() { + Ptr::from(QuantumProjection::with_tracer( + self.associated_runtime.engines.borrow(), + self.associated_runtime.trace_module.borrow()) + ) + } else { + self.projections.values().next().unwrap().clone() + }; + + self.projections.insert(qb.index, projection.clone()); + projection + } + + pub fn deactivate_projection(&mut self, qb: &Qubit) { + self.projections.remove(&qb.index); + } +} diff --git a/src/munchkin/pykin/src/smart_pointers.rs b/src/munchkin/pykin/src/smart_pointers.rs new file mode 100644 index 0000000..de49df9 --- /dev/null +++ b/src/munchkin/pykin/src/smart_pointers.rs @@ -0,0 +1,512 @@ +use std::hash::{Hash, Hasher}; +use std::ops::{Deref, DerefMut}; +use std::borrow::{Borrow}; +use std::cell::{Cell}; +use std::fmt::{Display, Formatter}; + +pub type Ptr = FlexiPtr; + +/// Allows an expression call to be done mutably without resorting to borrow casting. Use this +/// when you need to override the mutability constraint as it's the safest version, ironically. +#[macro_export] +macro_rules! with_mutable { + ($val:ident.$($rest:tt)*) => { + unsafe { + (*$val.as_ptr()).$($rest)* + } + }; +} + +/// See [with_mutable]. Just a modification of that macro that allows the target pointer +/// to be a field on an object (since you can't seemingly match on 'self'). +#[macro_export] +macro_rules! with_mutable_self { + ($self:ident.$val:ident$($rest:tt)*) => { + unsafe { + (*$self.$val.as_ptr())$($rest)* + } + }; +} + +pub struct FlexiRef { + counter: *mut Cell, + value: *mut Cell, +} + +impl Drop for FlexiRef { + fn drop(&mut self) { + unsafe { + drop(Box::from_raw(self.value)); + drop(Box::from_raw(self.counter)); + } + } +} + +impl FlexiRef { + pub fn new(value: T, initial_count: usize) -> FlexiRef { + FlexiRef { + value: Box::into_raw(Box::new(Cell::new(value))), + counter: Box::into_raw(Box::new(Cell::new(initial_count))) + } + } +} + +impl FlexiRef { + pub fn inc(&self) { + unsafe { + let counter = self.counter.as_ref().unwrap(); + counter.set(counter.get() + 1); + } + } + + pub fn inc_by(&self, val: usize) { + unsafe { + let counter = self.counter.as_ref().unwrap(); + counter.set(counter.get() + val); + } + } + + pub fn dec(&mut self) { + unsafe { + let counter = self.counter.as_ref().unwrap(); + counter.set(counter.get() - 1); + } + } + + pub fn dec_by(&mut self, val: usize) { + unsafe { + let counter = self.counter.as_ref().unwrap(); + counter.set(counter.get() - val); + } + } + + pub fn ref_count(&self) -> usize { + unsafe { (*self.counter).get().clone() } + } + + pub fn value(&self) -> &mut T { + unsafe { + (*self.value).get_mut() + } + } +} + +/// Cloning a reference means just copying across the pointers. +impl Clone for FlexiRef { + fn clone(&self) -> Self { + FlexiRef { + value: self.value, + counter: self.counter + } + } +} + +impl PartialEq for FlexiRef { + fn eq(&self, other: &Self) -> bool { + self.counter.addr() == other.counter.addr() && self.value.addr() == other.value.addr() + } +} + +impl Eq for FlexiRef { +} + +impl Display for FlexiRef { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(format!( + "Value: [{}] '{}', count: [{}] {}", + self.value.addr(), + self.value().to_string(), + self.counter.addr(), + self.ref_count()) + .as_str()) + } +} + +/// Reference-counted smart-pointer which avoids Rusts mutation and lifetime rules. +/// +/// Functionally this was built to act as a C++-like smart pointer leaving lifetime and +/// potential dangerous usages up to the writer, not Rusts various analysis'. +/// +/// It acts as a super-pointer, merging owned objects and borrowed references into one structure +/// and treating them as (mostly) the same. There are some operations which cannot be performed +/// on pointers of differing types due to the structure of the internal data. +/// +/// Since its internals are raw pointers Rusts lifetime rules have no clue about them, and since +/// raw pointers are also treated specially in regards to the mutation you can take out +/// infinite mutable aliases if performed through the raw pointer itself. +/// +/// Due to this constraint around mutation, we use macros that perform the operations on +/// the raw pointers instead of returning them as borrows - which violates Rusts rules in certain +/// situations, multi-mutable-aliasing being one of them. +pub enum FlexiPtr { + None, + RefCounted(*mut FlexiRef), + Borrow(*mut T) +} + +// TODO: Apply appropriate thread-safety later when it's actually needed. Certain things need +// the trait in situations when threading will never come up. +unsafe impl Send for FlexiPtr { +} + +impl Display for FlexiPtr { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + if !FlexiPtr::is_null(self) { + self.deref().fmt(f) + } else { + Ok(()) + } + } +} + +impl FlexiPtr { + /// Effectively extends the passed-in flexi pointer into this object, + /// dropping its existing value and ref count. + /// + /// In-place equivalent to: + /// ``` + /// *self.flexi_pointer = *flexi_ref; + /// ``` + pub fn expand_into(&self, val: &FlexiPtr) { + match self { + FlexiPtr::RefCounted(ref_) => { + match val.borrow() { + FlexiPtr::RefCounted(other_ref) => { + unsafe { + // We don't want to merge the same pointers into each other. + let is_the_same = (**ref_) == (**other_ref); + if is_the_same { + return; + } + + let old_count = (**ref_).ref_count(); + + // Assigning flexi-ref to the structure overwrites the pointers value + // for that struct only, not every one. So need to just assign the + // inner pointers instead. + (**ref_).value = (**other_ref).value; + (**ref_).counter = (**other_ref).counter; + + (**ref_).inc_by(old_count); + } + } + FlexiPtr::Borrow(_) => { + panic!("Can't extract from ref-counted into borrow-driven flexi-pointer."); + } + _ => {} + } + } + FlexiPtr::Borrow(borrow) => { + match val { + FlexiPtr::RefCounted(_) => { + panic!("Can't extract from ref-counted into borrow-driven flexi-pointer."); + } + FlexiPtr::Borrow(other_borrow) => { + // TODO: Probably doesn't work, test and fix. + // Reading a reference with a bitwise copy is probably fine as it's just a + // pointer anyway. + unsafe { (*borrow).write(other_borrow.read()) }; + } + _ => {} + } + } + _ => {} + } + } +} + +impl FlexiPtr { + pub fn is_null(self_: &Self) -> bool { + match self_ { + FlexiPtr::None => true, + _ => false + } + } + + pub fn is_not_null(self_: &Self) -> bool { + !FlexiPtr::is_null(self_) + } + + pub fn as_address(self_: &Self) -> usize { + // TODO: There's got to be a better way to get the address than... this. + self_.deref() as *const T as *mut T as *mut () as usize + } + + /// Checks equality against pointers. + pub fn eq(self_: &Self, other: &Self) -> bool { + if FlexiPtr::is_null(self_) && FlexiPtr::is_null(other) { + true + } else if FlexiPtr::is_null(self_) || FlexiPtr::is_null(other) { + false + } else { + FlexiPtr::as_address(self_) == FlexiPtr::as_address(other) + } + } + + /// Returns the internal value as a pointer. Panics if it's None. + pub fn as_ptr(&self) -> *mut T { + match self { + FlexiPtr::RefCounted(val) => unsafe { (*(**val).value).as_ptr() }, + FlexiPtr::Borrow(val) => *val, + _ => panic!("Attempted deref on null pointer.") + } + } + + /// Returns count of currently live pointers in this network of flexi-pointers. + /// Will return None for things that don't have a ref-count. + fn ref_count(&self) -> Option { + match self { + FlexiPtr::RefCounted(ref_) => unsafe { Some((**ref_).ref_count()) } + _ => None + } + } + + /// We want to be able to drop the internals outside of the usual drop + /// pathways, so have a seperate method. + fn drop_internals(&mut self) { + unsafe { + // Borrows don't require a drop, neither does None, only if we're the owner of + // an object do we want to do anything to it. + match self { + FlexiPtr::RefCounted(ref_) => { + (**ref_).dec(); + if (**ref_).ref_count() <= 0 { + drop(Box::from_raw(*ref_)); + } + }, + _ => {} + } + } + } +} + +impl FlexiPtr { + /// Clones the inner object and returns a new pointer from it. + pub fn clone_inner(&self) -> FlexiPtr { + Ptr::from(self.deref().clone()) + } +} + +impl Default for FlexiPtr { + fn default() -> Self { + FlexiPtr::None + } +} + +impl Drop for FlexiPtr { + fn drop(&mut self) { + self.drop_internals() + } +} + +impl Deref for FlexiPtr { + type Target = T; + + fn deref(&self) -> &Self::Target { + match self { + FlexiPtr::RefCounted(val) => { + unsafe { (*(*val)).value() } + }, + FlexiPtr::Borrow(val) => unsafe { val.as_ref().unwrap() }, + _ => panic!("Attempted deref on null pointer.") + } + } +} + +impl DerefMut for FlexiPtr { + fn deref_mut(&mut self) -> &mut Self::Target { + match self { + FlexiPtr::RefCounted(val) => { + unsafe { (*(*val)).value() } + }, + FlexiPtr::Borrow(val) => unsafe { val.as_mut().unwrap() }, + _ => panic!("Attempted deref on null pointer.") + } + } +} + +impl PartialEq for FlexiPtr { + fn eq(&self, other: &Self) -> bool { + if FlexiPtr::is_null(self) && FlexiPtr::is_null(other) { + true + } else if FlexiPtr::is_null(self) || FlexiPtr::is_null(other) { + false + } else { + self.deref() == other.deref() + } + } +} + +impl Eq for FlexiPtr { +} + +impl Hash for FlexiPtr { + fn hash(&self, state: &mut H) { + self.deref().hash(state) + } +} + +impl Clone for FlexiPtr { + fn clone(&self) -> Self { + match self.borrow() { + FlexiPtr::RefCounted(val) => { + unsafe { + (**val).inc(); + FlexiPtr::RefCounted(val.clone()) + } + }, + FlexiPtr::Borrow(val) => FlexiPtr::Borrow(*val), + FlexiPtr::None => FlexiPtr::None + } + } +} + +impl From<&FlexiPtr> for FlexiPtr { + fn from(value: &FlexiPtr) -> Self { + value.clone() + } +} + +impl From for FlexiPtr { + fn from(value: T) -> Self { + FlexiPtr::RefCounted(Box::into_raw(Box::new(FlexiRef::new(value, 1)))) + } +} + +/// Steals the reference and disassociates it from lifetime tracking. Only use +/// this in situations where other things enforce that the reference won't be dropped before +/// this falls out of use. +impl From<&T> for FlexiPtr { + fn from(value: &T) -> Self { + FlexiPtr::Borrow(value as *const T as *mut T) + } +} + +/// Disassociates reference from lifetime tracking. See immutable declaration for more details. +impl From<&mut T> for FlexiPtr { + fn from(value: &mut T) -> Self { + FlexiPtr::Borrow(value) + } +} + +#[cfg(test)] +mod tests { + use std::borrow::{Borrow}; + use std::{assert_eq}; + use std::fmt::{Display, Formatter}; + use std::ops::Add; + use crate::smart_pointers::{FlexiPtr}; + + struct Recursive { + nested_flexi: FlexiPtr, + value: String + } + + impl Display for Recursive { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.write_str(self.value.as_str()) + } + } + + #[test] + fn nested_smart_pointers() { + unsafe { + let mut starter = FlexiPtr::from( + Recursive { + nested_flexi: FlexiPtr::default(), + value: String::from("Hello, can I have some cake?") + }); + + starter.nested_flexi = starter.clone(); + + unsafe fn nested_calls(arg: &FlexiPtr) { + let mut new_pointer = arg.clone(); + unsafe fn further_nested_calls(arg: &FlexiPtr) { + let mut another_pointer = arg.clone(); + assert_eq!(another_pointer.ref_count().expect("Has to be active."), 4); + assert_eq!(FlexiPtr::as_address(arg), FlexiPtr::as_address(&another_pointer)); + another_pointer.value = String::from("I am no longer asking."); + } + further_nested_calls(new_pointer.borrow()); + assert_eq!(new_pointer.value, String::from("I am no longer asking.")); + new_pointer.value = String::from("That cake now belongs to me."); + assert_eq!(new_pointer.ref_count().expect("Has to be active."), 3); + } + nested_calls(starter.borrow()); + assert_eq!(starter.value, String::from("That cake now belongs to me.")); + assert_eq!(starter.ref_count().expect("Has to be active."), 2); + } + } + + #[test] + fn replace_test() { + let mut starter = FlexiPtr::from(5); + let second = starter.clone(); + let third = second.clone(); + assert_eq!(third.ref_count().expect("Exists."), 3); + + let replacement = FlexiPtr::from(10); + starter.expand_into(replacement.borrow()); + + assert_eq!(starter.ref_count().expect("Exists."), 4); + assert_eq!(*starter, *replacement); + assert_eq!(*starter, *second); + assert_eq!(*second, *third); + } + + #[test] + fn recursive_replace() { + let mut starter = FlexiPtr::from(Recursive { nested_flexi: Default::default(), value: "Dave".to_string() }); + let second = FlexiPtr::from(Recursive { nested_flexi: Default::default(), value: "Dave".to_string() }); + + let first_rc = starter.ref_count().unwrap(); + starter.expand_into(&second); + let second_rc = starter.ref_count().unwrap(); + + for _ in 0..40 { + starter.expand_into(&second); + } + + let final_rc = starter.ref_count().unwrap(); + + assert_eq!(final_rc, 2); + } + + #[test] + fn complicated_expansion() { + let starter = FlexiPtr::from(Recursive { nested_flexi: Default::default(), value: "Dave".to_string() }); + let mut slist = Vec::new(); + + // Copy to make sure clone propagates pointers. + for _ in 0..40 { + slist.push(starter.clone()); + } + + let second = FlexiPtr::from(Recursive { nested_flexi: Default::default(), value: "Dave the second".to_string() }); + let mut dlist = Vec::new(); + + for _ in 0..40 { + dlist.push(second.clone()); + } + + // Only replace half of our objects. Since everything is linked, they should all + // change. This enforces that this link isn't broken. + for i in 0..20 { + slist.get_mut(i).unwrap().expand_into(&dlist.get_mut(i).unwrap()); + } + + // Iterate through everything, they should now all be the same value and ref count. + for val in slist.iter() { + assert_eq!(val.value, second.value); + assert_eq!(val.ref_count().expect("Should be a reference"), second.ref_count().expect("Should be a reference")); + } + + for val in dlist.iter() { + assert_eq!(val.value, second.value); + assert_eq!(val.ref_count().expect("Should be a reference"), second.ref_count().expect("Should be a reference")); + } + + assert_eq!(starter.value, "Dave the second".to_string()); + assert_eq!(starter.ref_count().unwrap(), 82); + } +} \ No newline at end of file diff --git a/src/munchkin/rust-toolchain.toml b/src/munchkin/rust-toolchain.toml new file mode 100644 index 0000000..271800c --- /dev/null +++ b/src/munchkin/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "nightly" \ No newline at end of file diff --git a/src/munchkin/scripts/build.ps1 b/src/munchkin/scripts/build.ps1 new file mode 100644 index 0000000..112f9ca --- /dev/null +++ b/src/munchkin/scripts/build.ps1 @@ -0,0 +1,57 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +#Requires -PSEdition Core + +<# + .SYNOPSIS + Build: Bootstraps psake and invokes the build. +#> +[cmdletbinding()] +param( + [Parameter(Position = 0, Mandatory = 0)] + [string]$buildFile = "$(Join-Path $PSScriptRoot psakefile.ps1)", + [Parameter(Position = 1, Mandatory = 0)] + [string[]]$taskList = @(), + [Parameter(Position = 2, Mandatory = 0)] + [switch]$docs = $false, + [Parameter(Position = 3, Mandatory = 0)] + [System.Collections.Hashtable]$parameters = @{}, + [Parameter(Position = 4, Mandatory = 0)] + [System.Collections.Hashtable]$properties = @{}, + [Parameter(Position = 5, Mandatory = $false)] + [switch]$detailedDocs = $false +) + +# PS 7.3 introduced exec alias which breaks the build. +Remove-Item alias:exec -ErrorAction SilentlyContinue + +if ($null -eq (Import-Module -Name psake -PassThru -ErrorAction SilentlyContinue)) { + Install-Module -Name Psake -Scope CurrentUser -Repository PSGallery -Force -Verbose +} + +$scriptPath = $(Split-Path -Path $MyInvocation.MyCommand.path -Parent) + +# '[p]sake' is the same as 'psake' but $Error is not polluted +Remove-Module -Name [p]sake -Verbose:$false +Import-Module -Name psake -Verbose:$false +if ($help) { + Get-Help -Name Invoke-psake -Full + return +} + +if ($buildFile -and (-not (Test-Path -Path $buildFile))) { + $absoluteBuildFile = (Join-Path -Path $scriptPath -ChildPath $buildFile) + if (Test-path -Path $absoluteBuildFile) { + $buildFile = $absoluteBuildFile + } +} + +$nologo = $true +$framework = $null +$initialization = {} +Invoke-psake $buildFile $taskList $framework $docs $parameters $properties $initialization $nologo $detailedDocs $notr + +if (!$psake.build_success) { + exit 1 +} diff --git a/src/munchkin/scripts/psakefile.ps1 b/src/munchkin/scripts/psakefile.ps1 new file mode 100644 index 0000000..31d234e --- /dev/null +++ b/src/munchkin/scripts/psakefile.ps1 @@ -0,0 +1,281 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +Include utils.ps1 + +Properties { + $Root = Resolve-Path (Split-Path -Parent $PSScriptRoot) + $BuildLlvm = Join-Path $Root build-llvm + $Pykin = Join-Path $Root pykin + $Examples = Join-Path $Root examples + $Target = Join-Path $Root target + $Wheels = Join-Path $Target wheels + $CargoConfigToml = Join-Path $Root .cargo config.toml + $VscodeSettingsJson = Join-Path $Root .vscode settings.json + $DocsRoot = Join-Path $Root docs + $DocsBuild = Join-Path $DocsRoot _build + $RustVersion = "1.64.0" + $ManylinuxTag = "manylinux2014_x86_64_maturin" + $ManylinuxRoot = "/io" + $Python = Resolve-Python +} + +task default -depends build +task build -depends build-llvm, pykin +task checks -depends cargo-fmt, cargo-clippy, black, mypy +task manylinux -depends build-manylinux-container-image, run-manylinux-container-image + +task run-manylinux-container-image -preaction { Write-CacheStats } -postaction { Write-CacheStats } { + # For any of the volumes mapped, if the dir doesn't exist, + # docker will create it and it will be owned by root and + # the caching/install breaks with permission errors. + # New-Item is idempotent so we don't need to check for existence + $cacheMount, $cacheEnv = Get-CCacheParams + Write-BuildLog "Running container image: $ManylinuxTag" + $ioVolume = "${Root}:$ManylinuxRoot" + $userName = Get-LinuxContainerUserName + + Invoke-LoggedCommand { + docker run --rm ` + --user $userName ` + --volume $ioVolume @cacheMount @cacheEnv ` + --env MK_CACHE_DIR=/tmp/llvm ` + --workdir $ManylinuxRoot ` + $ManylinuxTag ` + conda run --no-capture-output pwsh build.ps1 -t default + } +} + +task cargo-fmt { + Invoke-LoggedCommand -workingDirectory $Root -errorMessage "Please run 'cargo fmt --all' before pushing" { + cargo fmt --all -- --check + } +} + +task cargo-clippy -depends init { + Invoke-LoggedCommand -workingDirectory $Root -errorMessage "Please fix the above clippy errors" { + cargo clippy --workspace --all-targets @(Get-CargoArgs) -- -D warnings + } +} + +task black -depends check-environment { + exec { pip install black } + Invoke-LoggedCommand -workingDirectory $Root -errorMessage "Please run black before pushing" { + black --check --extend-exclude "^/examples/mock_language/" . + } +} + +task mypy -depends check-environment { + $reqs = Resolve-PythonRequirements "$Pykin[test]" + exec { pip install --requirement (Join-Path $Examples requirements.txt) @reqs mypy } + Invoke-LoggedCommand -workingDirectory $Root -errorMessage "Please fix the above mypy errors" { + mypy + } +} + +task build-llvm -depends init { + Invoke-LoggedCommand -workingDirectory $BuildLlvm { cargo test --release @(Get-CargoArgs) } + Invoke-LoggedCommand -workingDirectory $BuildLlvm { cargo build --release @(Get-CargoArgs) } +} + +task pykin -depends init { + $env:MATURIN_PEP517_ARGS = (Get-CargoArgs) -Join " " + Get-Wheels pykin | Remove-Item -Verbose + Invoke-LoggedCommand { pip --verbose wheel --no-deps --wheel-dir $Wheels $Pykin } + + if (Test-CommandExists auditwheel) { + $unauditedWheels = Get-Wheels pykin + Invoke-LoggedCommand { auditwheel repair --wheel-dir $Wheels $unauditedWheels } + $unauditedWheels | Remove-Item + } + + $packages = Get-Wheels pykin + Invoke-LoggedCommand -workingDirectory $Root { + pip install --force-reinstall --no-deps $packages + } + + Invoke-LoggedCommand -workingDirectory $Root { pytest . } +} + +task wheelhouse -precondition { -not (Test-Path (Join-Path $Wheels *.whl)) } { + Invoke-Task build +} + +task docs -depends check-environment, wheelhouse { + Invoke-LoggedCommand { + pip install --requirement (Join-Path $DocsRoot requirements.txt) (Join-Path $Wheels *.whl) + } + Invoke-LoggedCommand { sphinx-build -M html $DocsRoot $DocsBuild -W --keep-going } +} + +task check-environment { + $pyenv = Join-Path $Root ".env" + if ((Test-Path -Path $pyenv) -eq $false) { + Write-BuildLog "No virtual environment found." + Write-BuildLog "Setting up virtual environment in $pyenv" + & $Python -m venv $pyenv + } + else { + Write-BuildLog "Virtual environment found." + } + + if ($IsWindows) { + Write-BuildLog "In Windows" + . (Join-Path $pyenv Scripts Activate.ps1) + } + else { + Write-BuildLog "Not in Windows" + . (Join-Path $pyenv bin Activate.ps1) + } + + $env_message = @( + "Building LLVM requires a virtualenv or conda environment to build.", + "Neither the VIRTUAL_ENV nor CONDA_PREFIX environment variables are set.", + "See https://virtualenv.pypa.io/en/latest/index.html on how to use virtualenv" + ) + Assert ((Test-InVirtualEnvironment) -eq $true) ($env_message -Join ' ') +} + +task init -depends check-environment { + # build-llvm has this logic built in when compiled on its own + # but we must have LLVM installed prior to the wheels being built. + + # if an external LLVM is specified, make sure it exist and + # skip further bootstapping + if (Test-Path env:\MK_LLVM_EXTERNAL_DIR) { + Use-ExternalLlvmInstallation + } + else { + $packagePath = Resolve-InstallationDirectory + if (Test-LlvmConfig $packagePath) { + Write-BuildLog "LLVM target is already installed." + # LLVM is already downloaded + Use-LlvmInstallation $packagePath + } + else { + Write-BuildLog "LLVM target is not installed." + if (Test-AllowedToDownloadLlvm) { + Write-BuildLog "Downloading LLVM target" + Invoke-Task "install-llvm-from-archive" + } + else { + Write-BuildLog "Downloading LLVM Disabled, building from source." + # We don't have an external LLVM installation specified + # We are not downloading LLVM + # So we need to build it. + Invoke-Task "install-llvm-from-source" + } + $installationDirectory = Resolve-InstallationDirectory + Use-LlvmInstallation $installationDirectory + } + } +} + +task install-llvm-from-archive { + install-llvm $BuildLlvm download (Get-LLVMFeatureVersion) + $installationDirectory = Resolve-InstallationDirectory + Assert (Test-LlvmConfig $installationDirectory) "install-llvm-from-archive failed to install a usable LLVM installation" +} + +task install-llvm-from-source -depends configure-sccache -postaction { Write-CacheStats } { + if ($IsWindows) { + Include vcvars.ps1 + } + install-llvm $BuildLlvm build (Get-LLVMFeatureVersion) + $installationDirectory = Resolve-InstallationDirectory + Assert (Test-LlvmConfig $installationDirectory) "install-llvm-from-source failed to install a usable LLVM installation" +} + +task package-manylinux-llvm -depends build-manylinux-container-image -preaction { Write-CacheStats } -postaction { Write-CacheStats } { + # For any of the volumes mapped, if the dir doesn't exist, + # docker will create it and it will be owned by root and + # the caching/install breaks with permission errors. + # New-Item is idempotent so we don't need to check for existence + $cacheMount, $cacheEnv = Get-CCacheParams + Write-BuildLog "Running container image: $ManylinuxTag" + $ioVolume = "${Root}:$ManylinuxRoot" + $userName = Get-LinuxContainerUserName + + Invoke-LoggedCommand { + docker run --rm ` + --user $userName ` + --volume $ioVolume @cacheMount @cacheEnv ` + --workdir $ManylinuxRoot ` + --env MK_PKG_DEST=$ManylinuxRoot/target/manylinux ` + $ManylinuxTag ` + conda run --no-capture-output pwsh build.ps1 -t package-llvm + } +} + +task package-llvm { + if ($IsWindows) { + Include vcvars.ps1 + } + $clear_pkg_dest_var = $false + if (!(Test-Path env:\MK_PKG_DEST)) { + $clear_pkg_dest_var = $true + $env:MK_PKG_DEST = $Target + } + New-Item $env:MK_PKG_DEST -ItemType Directory -Force + try { + Invoke-LoggedCommand -workingDirectory $BuildLlvm { + cargo build --release --no-default-features --features "package-llvm,$(Get-LLVMFeatureVersion)-no-llvm-linking" -vv + } + } + finally { + if ($clear_pkg_dest_var) { + Remove-Item -Path Env:MK_PKG_DEST + } + } +} + +task build-manylinux-container-image { + Write-BuildLog "Building container image manylinux-llvm-builder" + Invoke-LoggedCommand -workingDirectory (Join-Path $Root eng) { + $user = Get-LinuxContainerUserName + $uid = Get-LinuxContainerUserId + $gid = Get-LinuxContainerGroupId + Get-Content Dockerfile.manylinux | docker build ` + --build-arg USERNAME=$user ` + --build-arg USER_UID=$uid ` + --build-arg USER_GID=$gid ` + --build-arg RUST_VERSION=$RustVersion ` + --tag $ManylinuxTag ` + - + } +} + +task check-licenses { + # Uses cargo-deny to verify that the linked components + # only use approved licenses + # https://github.com/EmbarkStudios/cargo-deny + Invoke-LoggedCommand -wd $repo.root { + cargo deny check licenses + } +} + +task update-noticefiles { + # use cargo-about to generate a notice files + # notice files are only for wheel distributions + # as no bundled sources are in the sdist. + + # llvm special license is already in the template + # as it is a hidden transitive dependency. + # https://github.com/EmbarkStudios/cargo-about + $config = Join-Path $Root notice.toml + $template = Join-Path $Root notice.hbs + $notice = Join-Path $Pykin NOTICE-WHEEL.txt + Invoke-LoggedCommand -workingDirectory $Pykin { + cargo about generate --config $config --all-features --output-file $notice $template + $contents = Get-Content -Raw $notice + [System.Web.HttpUtility]::HtmlDecode($contents) | Out-File $notice + } +} + +task configure-sccache -postaction { Write-CacheStats } { + if (Test-CommandExists sccache) { + Write-BuildLog "Starting sccache server" + & { sccache --start-server } -ErrorAction SilentlyContinue + Write-BuildLog "Started sccache server" + } +} diff --git a/src/munchkin/scripts/utils.ps1 b/src/munchkin/scripts/utils.ps1 new file mode 100644 index 0000000..ae7bdf6 --- /dev/null +++ b/src/munchkin/scripts/utils.ps1 @@ -0,0 +1,348 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +Properties { + $llvm_releases_url = "https://github.com/llvm/llvm-project/releases" + $feature2releaseprefix = @{ "llvm11-0" = "/download/llvmorg-11.0.0/clang+llvm-11.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz"; + "llvm12-0" = "/download/llvmorg-12.0.0/clang+llvm-12.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz"; + "llvm13-0" = "/download/llvmorg-13.0.0/clang+llvm-13.0.0-x86_64-linux-gnu-ubuntu-20.04.tar.xz"; + "llvm14-0" = "/download/llvmorg-14.0.0/clang+llvm-14.0.0-x86_64-linux-gnu-ubuntu-18.04.tar.xz" + } +} + +if (!(Test-Path function:\Get-RepoRoot)) { + # pin working directory to this repo in case + # we are ever in a submodule + function Get-RepoRoot { + exec -workingDirectory $PSScriptRoot { + git rev-parse --show-toplevel + } + } +} + +# Fix temp path for non-windows platforms if missing +if (!(Test-Path env:\TEMP)) { + $env:TEMP = [System.IO.Path]::GetTempPath() +} + +#### +# Utilities +#### + +# Writes an Azure DevOps message with default debug severity +function Write-BuildLog { + param ( + [Parameter(Mandatory = $true)] + [string]$message, + [Parameter(Mandatory = $false)] + [ValidateSet("group", "warning", "error", "section", "debug", "command", "endgroup")] + [string]$severity = "debug" + ) + Write-Host "##[$severity]$message" +} + +# Returns true if a command with the specified name exists. +function Test-CommandExists($name) { + $null -ne (Get-Command $name -ErrorAction SilentlyContinue) +} + +# Returns true if the current environment is a dev container. +function Test-InDevContainer { + $IsLinux -and (Test-Path env:\IN_DEV_CONTAINER) +} + + +# Sets the LLVM path in the env section of the .cargo/config.toml +# Configures vscode rust analyzer to the correct features +function Use-LlvmInstallation { + param ( + [string]$path + ) + Write-BuildLog "Setting LLVM installation to: $path" + + $llvm_config_options = @(Get-Command (Join-Path $path "bin" "llvm-config*")) + Assert ($llvm_config_options.Length -gt 0) "llvm config not found in $path" + + $llvm_config = $llvm_config_options[0].Source + Write-BuildLog "Found llvm-config : $llvm_config" + + $version = [Version]::Parse("$(&$llvm_config --version)") + $prefix = "LLVM_SYS_$($version.Major)0_PREFIX" + + Write-BuildLog "Setting $prefix set to: $path" + + if ($IsWindows) { + # we have to escape '\' + $path = $path.Replace('\', '\\') + } + + # Create the workspace cofig.toml and set the LLVM_SYS env var + New-Item -ItemType File -Path $CargoConfigToml -Force + Add-Content -Path $CargoConfigToml -Value "[env]" + Add-Content -Path $CargoConfigToml -Value "$($prefix) = `"$($path)`"" + + # Add llvm feature version for rust-analyzer extension + $vscode_settings = @{} + if (!(Test-Path $VscodeSettingsJson)) { + New-Item -ItemType File -Path $VscodeSettingsJson -Force + } + else { + $vscode_settings = Get-Content $VscodeSettingsJson | ConvertFrom-Json -AsHashtable + } + + $vscode_settings."rust-analyzer.cargo.features" = @("$(Get-LLVMFeatureVersion)") + $vscode_settings | ConvertTo-Json | Set-Content -Path $VscodeSettingsJson +} + +function Test-LlvmConfig { + param ( + [string]$path + ) + + $llvm_config_options = @(Get-Command (Join-Path $path "bin" "llvm-config*")) + if ($llvm_config_options.Length -eq 0) { + return $false + } + $llvm_config = $llvm_config_options[0].Source + try { + exec { + & $llvm_config --version | Out-Null + } + } + catch { + return $false + } + return $true +} + +function Resolve-InstallationDirectory { + if (Test-Path env:\MK_LLVM_EXTERNAL_DIR) { + return $env:MK_LLVM_EXTERNAL_DIR + } + else { + $packagePath = Get-DefaultInstallDirectory + return $packagePath + } +} + +function Get-DefaultInstallDirectory { + if (Test-Path env:\MK_CACHE_DIR) { + $env:MK_CACHE_DIR + } + else { + Join-Path $Target (Get-LLVMFeatureVersion) + } +} + +# Executes the supplied script block using psake's exec +# Warning: Do not use this command on anything that contains +# sensitive information! +function Invoke-LoggedCommand { + [CmdletBinding()] + param( + [Parameter(Mandatory = $true)] + [scriptblock]$cmd, + + [string]$errorMessage = $null, + + [int]$maxRetries = 0, + + [string]$retryTriggerErrorPattern = $null, + + [Alias("wd")] + [string]$workingDirectory = $null + ) + + Write-BuildLog "Invoke-LoggedCommand in $workingDirectory`:" + Write-BuildLog $ExecutionContext.InvokeCommand.ExpandString($cmd).Trim() "command" + + # errorMessage pulls default values from psake. We + # only want to pass the param if we want to override. + # all other parameters have safe defaults. + $extraArgs = $errorMessage ? @{ "errorMessage" = $errorMessage } : @{}; + exec $cmd @extraArgs ` + -maxRetries $maxRetries ` + -retryTriggerErrorPattern $retryTriggerErrorPattern ` + -workingDirectory $workingDirectory +} + +function Use-ExternalLlvmInstallation { + Write-BuildLog "Using LLVM installation specified by MK_LLVM_EXTERNAL_DIR" + Assert (Test-Path $env:MK_LLVM_EXTERNAL_DIR) "MK_LLVM_EXTERNAL_DIR folder does not exist" + Use-LlvmInstallation $env:MK_LLVM_EXTERNAL_DIR +} + +function Test-AllowedToDownloadLlvm { + # If MK_DOWNLOAD_LLVM isn't set, we don't allow for download + # If it is set, then we use its value + ((Test-Path env:\MK_DOWNLOAD_LLVM) -and ($env:MK_DOWNLOAD_LLVM -eq $true)) +} + +function Test-InCondaEnvironment { + (Test-Path env:\CONDA_PREFIX) +} + +function Test-InVenvEnvironment { + (Test-Path env:\VIRTUAL_ENV) +} + +function Test-InVirtualEnvironment { + (Test-InCondaEnvironment) -or (Test-InVenvEnvironment) +} + +function Get-LinuxTargetTriple { + $triple = rustc -vV | sed -n 's|host: ||p' + $triple +} + +function Get-LinuxContainerUserId { + if (Test-Path env:\MK_CONTAINER_USERID) { + $env:MK_CONTAINER_USERID + } + else { + id -u + } +} + +function Get-LinuxContainerGroupId { + if (Test-Path env:\MK_CONTAINER_GROUPID) { + $env:MK_CONTAINER_GROUPID + } + else { + id -g + } +} + +function Get-LinuxContainerUserName { + if (Test-Path env:\MK_CONTAINER_USERNAME) { + $env:MK_CONTAINER_USERNAME + } + else { + [Environment]::UserName + } +} + +function Write-CacheStats { + if (Test-CommandExists ccache) { + Write-BuildLog "ccache config:" + & { ccache --show-config } -ErrorAction SilentlyContinue + Write-BuildLog "ccache stats:" + & { ccache --show-stats } -ErrorAction SilentlyContinue + } + if (Test-CommandExists sccache) { + Write-BuildLog "sccache config/stats:" + & { sccache --show-stats } -ErrorAction SilentlyContinue + } +} + +function Get-LLVMFeatureVersion { + if (Test-Path env:\MK_LLVM_FEATURE_VERSION) { + $env:MK_LLVM_FEATURE_VERSION + } + else { + # "llvm11-0", "llvm12-0", "llvm13-0", "llvm14-0" + "llvm14-0" + } +} + +function Get-CargoArgs { + @("-vv") +} + +function Get-Wheels([string] $project) { + $name = $project.Replace('-', '_') + $pattern = Join-Path $Wheels $name-*.whl + Get-Item -ErrorAction Ignore $pattern +} + +function Get-Wheel([string] $project) { + $wheels = @(Get-Wheels $project) + Assert ($wheels.Length -gt 0) "Missing wheels for $project." + Assert ($wheels.Length -le 1) "Multiple wheels for $project ($wheels). Clean the wheels directory." + $wheels[0] +} + +function Resolve-Python() { + $hasPython = $null -ne (Get-Command python -ErrorAction Ignore) + if ($hasPython -and ((python --version) -Match "Python 3.*")) { + Write-BuildLog "Python" + "python" + } + else { + Write-BuildLog "Python 3" + "python3" + } +} + +function Resolve-PythonRequirements([string[]] $projects) { + $report = pip --quiet install --dry-run --ignore-installed --report - @projects | ConvertFrom-Json + $report.install.metadata ` + | Where-Object { !$_.name.StartsWith("pykin") } ` + | ForEach-Object { "$($_.name)==$($_.version)" } +} + +function install-llvm { + Param( + [Parameter(Mandatory)] + [string]$buildllvmDir, # root directory of the Rust `build-llvm` module + [Parameter(Mandatory)] + [ValidateSet("download", "build")] + [string]$operation, + [Parameter(Mandatory)] + [ValidateSet("llvm11-0", "llvm12-0", "llvm13-0", "llvm14-0")] + [string]$feature + ) + + $llvm_release = "$llvm_releases_url/$($feature2releaseprefix[$feature])" + $llvm_release_file = $llvm_release.split('/')[-1] + + $installationDirectory = Resolve-InstallationDirectory + Write-BuildLog "installationDirectory: $installationDirectory" + New-Item -ItemType Directory -Force $installationDirectory | Out-Null + if (($operation -eq "download")) { + if (!(Test-Path -Path "$installationDirectory/$llvm_release_file" -PathType Leaf)) { + Invoke-WebRequest -Uri "$llvm_release" -OutFile "$installationDirectory/$llvm_release_file" + } + else { + Write-BuildLog "Already downloaded pre-built LLVM binaries" + } + if (!(Test-Path -Path "$installationDirectory/bin" -PathType Leaf)) { + Write-BuildLog "Extracting LLVM binaries under $installationDirectory" + tar -xvf "$installationDirectory/$llvm_release_file" -C $installationDirectory --strip-components=1 + } else { + Write-BuildLog "Already extracted LLVM binaries" + } + } + elseif (($operation -eq "build")) { + Invoke-LoggedCommand -wd $buildllvmDir { + cargo build --release --no-default-features --features "$operation-llvm,$feature-no-llvm-linking" -vv + } + } +} + +function Get-CCacheParams { + # only ccache is supported in the container for now. + # we would need a way to specify which cache is used to + # support both. + if (Test-CommandExists ccache) { + # we need to map the local cache dir into the + # container. If the env var isn't set, ask ccache + $cacheDir = "" + if (Test-Path env:\CCACHE_DIR) { + $cacheDir = $Env:CCACHE_DIR + } + else { + $cacheDir = exec { ccache -k cache_dir } + } + if (![string]::IsNullOrWhiteSpace($cacheDir)) { + New-Item -ItemType Directory -Force $cacheDir | Out-Null + + $cacheDir = Resolve-Path $cacheDir + # mount the cache outside of any runner mappings + $cacheMount = @("-v", "${cacheDir}:/ccache") + $cacheEnv = @("-e", "CCACHE_DIR=`"/ccache`"") + return $cacheMount, $cacheEnv + } + } + return "", "" +} diff --git a/src/munchkin/scripts/vcvars.ps1 b/src/munchkin/scripts/vcvars.ps1 new file mode 100644 index 0000000..8b926fc --- /dev/null +++ b/src/munchkin/scripts/vcvars.ps1 @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# Licensed under the MIT License. + +if ($IsWindows) { + # find VS root + $vswhere = "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" + $visualStudioPath = & $vswhere -prerelease -latest -property installationPath + Write-Output "vs located at: $visualStudioPath" + + # Call vcvars64.bat and write the set calls to file + cmd.exe /c "call `"$visualStudioPath\VC\Auxiliary\Build\vcvars64.bat`" && set > %temp%\vcvars.txt" + + # Read the set calls and set the corresponding pwsh env vars + Get-Content "$Env:temp\vcvars.txt" | Foreach-Object { + if ($_ -match "^(.*?)=(.*)$") { + Set-Content "env:\$($matches[1])" $matches[2] + Write-Host "setting env: $($matches[1]) = $($matches[2])" + } + } +} \ No newline at end of file diff --git a/src/munchkin/tests/__init__.py b/src/munchkin/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/munchkin/tests/file_utils.py b/src/munchkin/tests/file_utils.py new file mode 100644 index 0000000..78886c8 --- /dev/null +++ b/src/munchkin/tests/file_utils.py @@ -0,0 +1,8 @@ +from os.path import abspath, join, dirname + +def get_qir_path(file_path): + return abspath(join(dirname(__file__), "files", "qir", file_path)) + +def get_qir(file_path): + with open(get_qir_path(file_path)) as ifile: + return ifile.read() diff --git a/src/munchkin/tests/files/qir/base_profile_ops.ll b/src/munchkin/tests/files/qir/base_profile_ops.ll new file mode 100644 index 0000000..4d88bdc --- /dev/null +++ b/src/munchkin/tests/files/qir/base_profile_ops.ll @@ -0,0 +1,74 @@ +; ModuleID = 'bell' +source_filename = "bell" + +%Qubit = type opaque +%Result = type opaque + +define void @main() #0 { +entry: + ;;; We can't process generic controlled operations or toffoli's right now. + + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Result* inttoptr (i64 0 to %Result*)) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 1 to %Result*), i8* null) + call void @__quantum__rt__array_record_output(i64 42, i8* null) + call void @__quantum__rt__tuple_record_output(i64 42, i8* null) + call void @__quantum__rt__initialize(i8* null) + ;;; call void @__quantum__qis__ccx__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*), %Qubit* inttoptr (i64 2 to %Qubit*)) + ;;; call void @__quantum__qis__cz__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__reset__body(%Qubit* inttoptr (i64 2 to %Qubit*)) + call void @__quantum__qis__rx__body(double 5.0, %Qubit* inttoptr (i64 2 to %Qubit*)) + call void @__quantum__qis__ry__body(double 5.0, %Qubit* inttoptr (i64 2 to %Qubit*)) + call void @__quantum__qis__rz__body(double 5.0, %Qubit* inttoptr (i64 2 to %Qubit*)) + call void @__quantum__qis__s__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__s_adj(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__t__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__t__adj(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__x__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__y__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__z__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare void @__quantum__rt__result_record_output(%Result*, i8*) + +declare void @__quantum__rt__array_record_output(i64, i8*) + +declare void @__quantum__rt__tuple_record_output(i64, i8*) + +declare void @__quantum__rt__initialize(i8*) + +declare void @__quantum__qis__ccx__body(%Qubit*, %Qubit*, %Qubit*) + +declare void @__quantum__qis__cz__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__reset__body(%Qubit*) + +declare void @__quantum__qis__rx__body(double, %Qubit*) + +declare void @__quantum__qis__ry__body(double, %Qubit*) + +declare void @__quantum__qis__rz__body(double, %Qubit*) + +declare void @__quantum__qis__s__body(%Qubit*) + +declare void @__quantum__qis__s_adj(%Qubit*) + +declare void @__quantum__qis__t__body(%Qubit*) + +declare void @__quantum__qis__t__adj(%Qubit*) + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__y__body(%Qubit*) + +declare void @__quantum__qis__z__body(%Qubit*) + +attributes #0 = { "EntryPoint" "requiredQubits"="2" "requiredResults"="2" } diff --git a/src/munchkin/tests/files/qir/basic_cudaq.ll b/src/munchkin/tests/files/qir/basic_cudaq.ll new file mode 100644 index 0000000..d01c5c1 --- /dev/null +++ b/src/munchkin/tests/files/qir/basic_cudaq.ll @@ -0,0 +1,39 @@ +; ModuleID = 'LLVMDialectModule' +source_filename = "LLVMDialectModule" +target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-linux-gnu" + +%Qubit = type opaque +%Result = type opaque + +@cstr.72303030303000 = private constant [7 x i8] c"r00000\00" + +declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__ry__body(double, %Qubit*) local_unnamed_addr + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) local_unnamed_addr + +declare void @__quantum__rt__result_record_output(%Result*, i8*) local_unnamed_addr + +declare void @__quantum__qis__mz__body(%Qubit*, %Result* writeonly) local_unnamed_addr #0 + +define void @__nvqpp__mlirgen__ansatz() local_unnamed_addr #1 { + tail call void @__quantum__qis__x__body(%Qubit* null) + tail call void @__quantum__qis__ry__body(double 5.900000e-01, %Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Qubit* null) + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Result* writeonly null) + tail call void @__quantum__rt__result_record_output(%Result* null, i8* nonnull getelementptr inbounds ([7 x i8], [7 x i8]* @cstr.72303030303000, i64 0, i64 0)) + ret void +} + +attributes #0 = { "irreversible" } +attributes #1 = { "entry_point" "output_labeling_schema"="schema_id" "output_names"="[[[0,[1,\22r00000\22]]]]" "qir_profiles"="base_profile" "requiredQubits"="2" "requiredResults"="1" } + +!llvm.module.flags = !{!0, !1, !2, !3, !4} + +!0 = !{i32 2, !"Debug Info Version", i32 3} +!1 = !{i32 1, !"qir_major_version", i32 1} +!2 = !{i32 7, !"qir_minor_version", i32 0} +!3 = !{i32 1, !"dynamic_qubit_management", i1 false} +!4 = !{i32 1, !"dynamic_result_management", i1 false} \ No newline at end of file diff --git a/src/munchkin/tests/files/qir/bell_psi_minus.ll b/src/munchkin/tests/files/qir/bell_psi_minus.ll new file mode 100644 index 0000000..141b186 --- /dev/null +++ b/src/munchkin/tests/files/qir/bell_psi_minus.ll @@ -0,0 +1,29 @@ +; ModuleID = 'bell' +source_filename = "bell" + +%Qubit = type opaque +%Result = type opaque + +define void @main() #0 { +entry: + call void @__quantum__qis__x__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Result* inttoptr (i64 0 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Result* inttoptr (i64 1 to %Result*)) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 0 to %Result*), i8* null) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 1 to %Result*), i8* null) + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare void @__quantum__rt__result_record_output(%Result*, i8*) + +attributes #0 = { "EntryPoint" "requiredQubits"="2" "requiredResults"="2" } diff --git a/src/munchkin/tests/files/qir/bell_psi_plus.ll b/src/munchkin/tests/files/qir/bell_psi_plus.ll new file mode 100644 index 0000000..331d607 --- /dev/null +++ b/src/munchkin/tests/files/qir/bell_psi_plus.ll @@ -0,0 +1,26 @@ +; ModuleID = 'bell' +source_filename = "bell" + +%Qubit = type opaque +%Result = type opaque + +define void @main() #0 { +entry: + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Result* inttoptr (i64 0 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Result* inttoptr (i64 1 to %Result*)) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 0 to %Result*), i8* null) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 1 to %Result*), i8* null) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare void @__quantum__rt__result_record_output(%Result*, i8*) + +attributes #0 = { "EntryPoint" "requiredQubits"="2" "requiredResults"="2" } diff --git a/src/munchkin/tests/files/qir/bell_qir_measure.bc b/src/munchkin/tests/files/qir/bell_qir_measure.bc new file mode 100644 index 0000000..e00d7ab Binary files /dev/null and b/src/munchkin/tests/files/qir/bell_qir_measure.bc differ diff --git a/src/munchkin/tests/files/qir/bell_theta_minus.ll b/src/munchkin/tests/files/qir/bell_theta_minus.ll new file mode 100644 index 0000000..e0fee11 --- /dev/null +++ b/src/munchkin/tests/files/qir/bell_theta_minus.ll @@ -0,0 +1,30 @@ +; ModuleID = 'bell' +source_filename = "bell" + +%Qubit = type opaque +%Result = type opaque + +define void @main() #0 { +entry: + call void @__quantum__qis__x__body(%Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__x__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Result* inttoptr (i64 0 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Result* inttoptr (i64 1 to %Result*)) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 0 to %Result*), i8* null) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 1 to %Result*), i8* null) + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare void @__quantum__rt__result_record_output(%Result*, i8*) + +attributes #0 = { "EntryPoint" "requiredQubits"="2" "requiredResults"="2" } diff --git a/src/munchkin/tests/files/qir/bell_theta_plus.ll b/src/munchkin/tests/files/qir/bell_theta_plus.ll new file mode 100644 index 0000000..8130025 --- /dev/null +++ b/src/munchkin/tests/files/qir/bell_theta_plus.ll @@ -0,0 +1,29 @@ +; ModuleID = 'bell' +source_filename = "bell" + +%Qubit = type opaque +%Result = type opaque + +define void @main() #0 { +entry: + call void @__quantum__qis__x__body(%Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Result* inttoptr (i64 0 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Result* inttoptr (i64 1 to %Result*)) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 0 to %Result*), i8* null) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 1 to %Result*), i8* null) + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare void @__quantum__rt__result_record_output(%Result*, i8*) + +attributes #0 = { "EntryPoint" "requiredQubits"="2" "requiredResults"="2" } diff --git a/src/munchkin/tests/files/qir/generator-bell.ll b/src/munchkin/tests/files/qir/generator-bell.ll new file mode 100644 index 0000000..331d607 --- /dev/null +++ b/src/munchkin/tests/files/qir/generator-bell.ll @@ -0,0 +1,26 @@ +; ModuleID = 'bell' +source_filename = "bell" + +%Qubit = type opaque +%Result = type opaque + +define void @main() #0 { +entry: + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Result* inttoptr (i64 0 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Result* inttoptr (i64 1 to %Result*)) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 0 to %Result*), i8* null) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 1 to %Result*), i8* null) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare void @__quantum__rt__result_record_output(%Result*, i8*) + +attributes #0 = { "EntryPoint" "requiredQubits"="2" "requiredResults"="2" } diff --git a/src/munchkin/tests/files/qir/hello.bc b/src/munchkin/tests/files/qir/hello.bc new file mode 100644 index 0000000..e65adfc Binary files /dev/null and b/src/munchkin/tests/files/qir/hello.bc differ diff --git a/src/munchkin/tests/files/qir/needs_optimisation.ll b/src/munchkin/tests/files/qir/needs_optimisation.ll new file mode 100644 index 0000000..fef97b3 --- /dev/null +++ b/src/munchkin/tests/files/qir/needs_optimisation.ll @@ -0,0 +1,30 @@ +; ModuleID = 'needs_optimisation' +source_filename = "needs_optimisation" + +%Qubit = type opaque +%Result = type opaque + +define void @main() #0 { +entry: + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Result* inttoptr (i64 0 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Result* inttoptr (i64 1 to %Result*)) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 0 to %Result*), i8* null) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 1 to %Result*), i8* null) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare void @__quantum__rt__result_record_output(%Result*, i8*) + +attributes #0 = { "EntryPoint" "requiredQubits"="2" "requiredResults"="2" } diff --git a/src/munchkin/tests/files/qir/needs_routing.ll b/src/munchkin/tests/files/qir/needs_routing.ll new file mode 100644 index 0000000..e42dd48 --- /dev/null +++ b/src/munchkin/tests/files/qir/needs_routing.ll @@ -0,0 +1,23 @@ +; ModuleID = 'needs_routing' +source_filename = "needs_routing" + +%Qubit = type opaque + +define void @main() #0 { +entry: + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 2 to %Qubit*), %Qubit* inttoptr (i64 0 to %Qubit*));ok + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Qubit* inttoptr (i64 3 to %Qubit*));ok + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Qubit* inttoptr (i64 2 to %Qubit*));wrong + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*));ok + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 3 to %Qubit*), %Qubit* inttoptr (i64 2 to %Qubit*)) + ret void +} + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__h__body(%Qubit*) + +attributes #0 = { "EntryPoint" "requiredQubits"="4" } diff --git a/src/munchkin/tests/files/qir/out_of_order_measure.ll b/src/munchkin/tests/files/qir/out_of_order_measure.ll new file mode 100644 index 0000000..3fec629 --- /dev/null +++ b/src/munchkin/tests/files/qir/out_of_order_measure.ll @@ -0,0 +1,26 @@ +; ModuleID = 'bell' +source_filename = "bell" + +%Qubit = type opaque +%Result = type opaque + +define void @main() #0 { +entry: + call void @__quantum__qis__h__body(%Qubit* inttoptr (i64 0 to %Qubit*)) + call void @__quantum__qis__cnot__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Qubit* inttoptr (i64 1 to %Qubit*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 1 to %Qubit*), %Result* inttoptr (i64 1 to %Result*)) + call void @__quantum__qis__mz__body(%Qubit* inttoptr (i64 0 to %Qubit*), %Result* inttoptr (i64 0 to %Result*)) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 1 to %Result*), i8* null) + call void @__quantum__rt__result_record_output(%Result* inttoptr (i64 0 to %Result*), i8* null) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) + +declare void @__quantum__qis__mz__body(%Qubit*, %Result*) + +declare void @__quantum__rt__result_record_output(%Result*, i8*) + +attributes #0 = { "EntryPoint" "requiredQubits"="2" "requiredResults"="2" } diff --git a/src/munchkin/tests/files/qir/select.bc b/src/munchkin/tests/files/qir/select.bc new file mode 100644 index 0000000..e19fbb3 Binary files /dev/null and b/src/munchkin/tests/files/qir/select.bc differ diff --git a/src/munchkin/tests/files/qir/teleportchain.ll b/src/munchkin/tests/files/qir/teleportchain.ll new file mode 100644 index 0000000..61f13ee --- /dev/null +++ b/src/munchkin/tests/files/qir/teleportchain.ll @@ -0,0 +1,110 @@ +; ModuleID = 'qat-link' +source_filename = "qat-link" + +%Qubit = type opaque +%Result = type opaque +%Array = type opaque +%String = type opaque + +define i8 @TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__Interop() local_unnamed_addr #0 { +entry: + tail call void @__quantum__qis__h__body(%Qubit* null) + tail call void @__quantum__qis__cnot__body(%Qubit* null, %Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + tail call void @__quantum__qis__h__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*), %Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + tail call void @__quantum__qis__h__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*), %Qubit* nonnull inttoptr (i64 5 to %Qubit*)) + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + tail call void @__quantum__qis__h__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*), %Result* null) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 1 to %Qubit*)) + %0 = tail call i1 @__quantum__qir__read_result(%Result* null) + br i1 %0, label %then0__1.i.i.i, label %continue__1.i.i.i + +then0__1.i.i.i: ; preds = %entry + tail call void @__quantum__qis__z__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + br label %continue__1.i.i.i + +continue__1.i.i.i: ; preds = %then0__1.i.i.i, %entry + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*), %Result* nonnull inttoptr (i64 1 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 2 to %Qubit*)) + %1 = tail call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 1 to %Result*)) + br i1 %1, label %then0__2.i.i.i, label %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i + +then0__2.i.i.i: ; preds = %continue__1.i.i.i + tail call void @__quantum__qis__x__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + br label %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i + +TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i: ; preds = %then0__2.i.i.i, %continue__1.i.i.i + tail call void @__quantum__qis__cnot__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*), %Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + tail call void @__quantum__qis__h__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*), %Result* nonnull inttoptr (i64 2 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 4 to %Qubit*)) + %2 = tail call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 2 to %Result*)) + br i1 %2, label %then0__1.i.i1.i, label %continue__1.i.i2.i + +then0__1.i.i1.i: ; preds = %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i + tail call void @__quantum__qis__z__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) + br label %continue__1.i.i2.i + +continue__1.i.i2.i: ; preds = %then0__1.i.i1.i, %TeleportChain__TeleportQubitUsingPresharedEntanglement__body.2.exit.i + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*), %Result* nonnull inttoptr (i64 3 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 3 to %Qubit*)) + %3 = tail call i1 @__quantum__qir__read_result(%Result* nonnull inttoptr (i64 3 to %Result*)) + br i1 %3, label %then0__2.i.i3.i, label %TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.1.exit + +then0__2.i.i3.i: ; preds = %continue__1.i.i2.i + tail call void @__quantum__qis__x__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) + br label %TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.1.exit + +TeleportChain__DemonstrateTeleportationUsingPresharedEntanglement__body.1.exit: ; preds = %continue__1.i.i2.i, %then0__2.i.i3.i + tail call void @__quantum__qis__mz__body(%Qubit* null, %Result* nonnull inttoptr (i64 4 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* null) + tail call void @__quantum__qis__mz__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*), %Result* nonnull inttoptr (i64 5 to %Result*)) + tail call void @__quantum__qis__reset__body(%Qubit* nonnull inttoptr (i64 5 to %Qubit*)) + %4 = tail call i1 @__quantum__rt__result_equal(%Result* nonnull inttoptr (i64 4 to %Result*), %Result* nonnull inttoptr (i64 5 to %Result*)) + %5 = sext i1 %4 to i8 + ret i8 %5 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() local_unnamed_addr + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) local_unnamed_addr + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) local_unnamed_addr + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) local_unnamed_addr + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) local_unnamed_addr + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) local_unnamed_addr + +declare void @__quantum__rt__qubit_release(%Qubit*) local_unnamed_addr + +declare void @__quantum__rt__qubit_release_array(%Array*) local_unnamed_addr + +declare %Result* @__quantum__qis__m__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__reset__body(%Qubit*) local_unnamed_addr + +declare %Result* @__quantum__rt__result_get_one() local_unnamed_addr + +declare void @__quantum__qis__x__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__z__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__h__body(%Qubit*) local_unnamed_addr + +declare void @__quantum__qis__cnot__body(%Qubit*, %Qubit*) local_unnamed_addr + +declare %String* @__quantum__rt__bool_to_string(i1) local_unnamed_addr + +declare void @__quantum__rt__message(%String*) local_unnamed_addr + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) local_unnamed_addr + +declare void @__quantum__qis__mz__body(%Qubit*, %Result* writeonly) + +declare i1 @__quantum__qir__read_result(%Result*) + +attributes #0 = { "EntryPoint" "InteropFriendly" "requiredQubits"="6" } diff --git a/src/munchkin/tests/qsharp/VQE/Program.qs b/src/munchkin/tests/qsharp/VQE/Program.qs new file mode 100644 index 0000000..a272451 --- /dev/null +++ b/src/munchkin/tests/qsharp/VQE/Program.qs @@ -0,0 +1,28 @@ +namespace Microsoft.Quantum.Chemistry.VQE { + +open Microsoft.Quantum.Core; +open Microsoft.Quantum.Chemistry; +open Microsoft.Quantum.Chemistry.JordanWigner; +open Microsoft.Quantum.Chemistry.JordanWigner.VQE; +open Microsoft.Quantum.Intrinsic; + + operation GetEnergyVQE (JWEncodedData: JordanWignerEncodingData, theta1: Double, theta2: Double, theta3: Double, nSamples: Int) : Double { + let (nSpinOrbitals, fermionTermData, inputState, energyOffset) = JWEncodedData!; + let (stateType, JWInputStates) = inputState; + let inputStateParam = ( + stateType, + [ + JordanWignerInputState((theta1, 0.0), [2, 0]), // singly-excited state + JordanWignerInputState((theta2, 0.0), [3, 1]), // singly-excited state + JordanWignerInputState((theta3, 0.0), [2, 3, 1, 0]), // doubly-excited state + JWInputStates[0] // Hartree-Fock state from Broombridge file + ] + ); + let JWEncodedDataParam = JordanWignerEncodingData( + nSpinOrbitals, fermionTermData, inputState, energyOffset + ); + return EstimateEnergy( + JWEncodedDataParam, nSamples + ); + } +} \ No newline at end of file diff --git a/src/munchkin/tests/qsharp/VQE/VQE.csproj b/src/munchkin/tests/qsharp/VQE/VQE.csproj new file mode 100644 index 0000000..f930576 --- /dev/null +++ b/src/munchkin/tests/qsharp/VQE/VQE.csproj @@ -0,0 +1,22 @@ + + + + Library + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + Always + + + + diff --git a/src/munchkin/tests/qsharp/VQE/libLLVM.dll b/src/munchkin/tests/qsharp/VQE/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/VQE/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/VQE/qir/VQE.ll b/src/munchkin/tests/qsharp/VQE/qir/VQE.ll new file mode 100644 index 0000000..03429b5 --- /dev/null +++ b/src/munchkin/tests/qsharp/VQE/qir/VQE.ll @@ -0,0 +1,31688 @@ + +%Tuple = type opaque +%Array = type opaque +%Callable = type opaque +%Qubit = type opaque +%Result = type opaque +%String = type opaque +%Range = type { i64, i64, i64 } + +@0 = internal constant [36 x i8] c"Qubit in invalid state. Expecting: \00" +@1 = internal constant [2 x i8] c"\22\00" +@2 = internal constant [13 x i8] c"\0A\09Expected:\09\00" +@3 = internal constant [5 x i8] c"true\00" +@4 = internal constant [6 x i8] c"false\00" +@5 = internal constant [11 x i8] c"\0A\09Actual:\09\00" +@Microsoft__Quantum__Intrinsic__Reset__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Reset__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@6 = internal constant [75 x i8] c"operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0.\00" +@PartialApplication__1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper] +@MemoryManagement__1__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper] +@MemoryManagement__2__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@Microsoft__Quantum__Intrinsic__H__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] +@PartialApplication__3__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__S__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] +@7 = internal constant [38 x i8] c"MultiplexPauli failed. Invalid pauli \00" +@8 = internal constant [7 x i8] c"PauliX\00" +@9 = internal constant [7 x i8] c"PauliY\00" +@10 = internal constant [7 x i8] c"PauliZ\00" +@11 = internal constant [7 x i8] c"PauliI\00" +@12 = internal constant [2 x i8] c".\00" +@PartialApplication__4__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@PartialApplication__5__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@PartialApplication__6__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@PartialApplication__7__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctladj__wrapper] +@PartialApplication__8__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctladj__wrapper] +@PartialApplication__9__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] +@PartialApplication__10__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@PartialApplication__11__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] +@PartialApplication__12__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] +@PartialApplication__13__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__ctladj__wrapper] +@Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__ctladj__wrapper] +@MemoryManagement__3__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__3__RefCount, void (%Tuple*, i32)* @MemoryManagement__3__AliasCount] +@PartialApplication__14__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___bd411897933a412dbc60a337f9d409f8___QsRef2__ComposedOutput____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___bd411897933a412dbc60a337f9d409f8___QsRef2__ComposedOutput____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__4__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__4__RefCount, void (%Tuple*, i32)* @MemoryManagement__4__AliasCount] +@13 = internal constant [47 x i8] c"Control register shorter than control pattern.\00" +@PartialApplication__15__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__ctladj__wrapper] +@Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____ctladj__wrapper] +@MemoryManagement__5__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__5__RefCount, void (%Tuple*, i32)* @MemoryManagement__5__AliasCount] +@PartialApplication__16__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__ctladj__wrapper] +@Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__ctladj__wrapper] +@MemoryManagement__6__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__6__RefCount, void (%Tuple*, i32)* @MemoryManagement__6__AliasCount] +@PartialApplication__17__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctladj__wrapper] +@Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__ctladj__wrapper] +@MemoryManagement__7__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__7__RefCount, void (%Tuple*, i32)* @MemoryManagement__7__AliasCount] +@PartialApplication__18__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__ctladj__wrapper] +@Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__ctladj__wrapper] +@MemoryManagement__8__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__8__RefCount, void (%Tuple*, i32)* @MemoryManagement__8__AliasCount] +@PartialApplication__19__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctladj__wrapper] +@Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__ctladj__wrapper] +@MemoryManagement__9__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__9__RefCount, void (%Tuple*, i32)* @MemoryManagement__9__AliasCount] +@PartialApplication__20__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctladj__wrapper] +@Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctladj__wrapper] +@MemoryManagement__10__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__10__RefCount, void (%Tuple*, i32)* @MemoryManagement__10__AliasCount] +@PartialApplication__21__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctladj__wrapper] +@Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctladj__wrapper] +@PartialApplication__22__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctladj__wrapper] +@Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj__wrapper] +@MemoryManagement__11__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__11__RefCount, void (%Tuple*, i32)* @MemoryManagement__11__AliasCount] +@14 = internal constant [11 x i8] c"Odd order \00" +@15 = internal constant [20 x i8] c" not yet supported.\00" +@PartialApplication__23__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation____QsRef2___AddGeneratorSystems____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2___AddGeneratorSystems____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__12__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__12__RefCount, void (%Tuple*, i32)* @MemoryManagement__12__AliasCount] +@Microsoft__Quantum__Simulation__IdentityGeneratorIndex__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation__AddGeneratorSystems__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__24__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____ctladj__wrapper] +@MemoryManagement__13__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__13__RefCount, void (%Tuple*, i32)* @MemoryManagement__13__AliasCount] +@PartialApplication__25__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____ctladj__wrapper] +@MemoryManagement__14__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__14__RefCount, void (%Tuple*, i32)* @MemoryManagement__14__AliasCount] +@PartialApplication__26__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctladj__wrapper] +@MemoryManagement__15__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__15__RefCount, void (%Tuple*, i32)* @MemoryManagement__15__AliasCount] +@16 = internal constant [39 x i8] c"Array must be of the length at least 1\00" +@PartialApplication__27__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___baa85e836eb8473188447f7dd40ddae1_ElementAt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___baa85e836eb8473188447f7dd40ddae1_ElementAt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__16__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__16__RefCount, void (%Tuple*, i32)* @MemoryManagement__16__AliasCount] +@17 = internal constant [22 x i8] c"Index is out of bound\00" +@18 = internal constant [71 x i8] c"Specified output array length must be longer than `inputArray` length.\00" +@19 = internal constant [33 x i8] c"`bits` must be between 0 and 63 \00" +@20 = internal constant [34 x i8] c"`number` must be between 0 and 2^\00" +@21 = internal constant [15 x i8] c" - 1, but was \00" +@PartialApplication__28__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____ctladj__wrapper] +@MemoryManagement__17__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__17__RefCount, void (%Tuple*, i32)* @MemoryManagement__17__AliasCount] +@PartialApplication__29__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctladj__wrapper] +@PartialApplication__30__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____ctladj__wrapper] +@MemoryManagement__18__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__18__RefCount, void (%Tuple*, i32)* @MemoryManagement__18__AliasCount] +@PartialApplication__31__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____ctladj__wrapper] +@MemoryManagement__19__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__19__RefCount, void (%Tuple*, i32)* @MemoryManagement__19__AliasCount] +@PartialApplication__32__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Math__ComplexPolar__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__ComplexPolar__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__20__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__20__RefCount, void (%Tuple*, i32)* @MemoryManagement__20__AliasCount] +@Microsoft__Quantum__Math__AbsD__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__AbsD__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__33__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__34__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__35__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@22 = internal constant [46 x i8] c"Unitary coupled-cluster PQRS failed: indices \00" +@23 = internal constant [3 x i8] c", \00" +@24 = internal constant [18 x i8] c" must be distinct\00" +@25 = internal constant [44 x i8] c"Unitary coupled-cluster PQ failed: indices \00" +@PartialApplication__36__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____ctladj__wrapper] +@MemoryManagement__21__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__21__RefCount, void (%Tuple*, i32)* @MemoryManagement__21__AliasCount] +@PartialApplication__37__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj__wrapper] +@MemoryManagement__22__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__22__RefCount, void (%Tuple*, i32)* @MemoryManagement__22__AliasCount] +@26 = internal constant [86 x i8] c"ComputeJordanWignerString failed. `idxFermions` must contain an even number of terms.\00" +@27 = internal constant [46 x i8] c"ComputeJordanWignerString failed. fermionIdx \00" +@28 = internal constant [15 x i8] c" out of range.\00" +@29 = internal constant [47 x i8] c"Completely invalid cluster operator specified.\00" +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorFunction____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorFunction____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__38__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__38__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef4__JordanWignerStateAsGeneratorIndex____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4__JordanWignerStateAsGeneratorIndex____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__23__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__23__RefCount, void (%Tuple*, i32)* @MemoryManagement__23__AliasCount] +@Microsoft__Quantum__Intrinsic__X__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___PrepareSingleConfigurationalStateSingleSiteOccupation____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___PrepareSingleConfigurationalStateSingleSiteOccupation____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__ctladj__wrapper] +@PartialApplication__39__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__39__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__24__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__24__RefCount, void (%Tuple*, i32)* @MemoryManagement__24__AliasCount] +@PartialApplication__40__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__ctladj__wrapper] +@MemoryManagement__25__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__25__RefCount, void (%Tuple*, i32)* @MemoryManagement__25__AliasCount] +@PartialApplication__41__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__26__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__26__RefCount, void (%Tuple*, i32)* @MemoryManagement__26__AliasCount] +@PartialApplication__42__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__42__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__Measure__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Measure__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__27__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__27__RefCount, void (%Tuple*, i32)* @MemoryManagement__27__AliasCount] +@PartialApplication__43__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__43__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__HTermsToGenIdx__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__28__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__28__RefCount, void (%Tuple*, i32)* @MemoryManagement__28__AliasCount] + +define double @Microsoft__Quantum__Chemistry__VQE__GetEnergyVQE__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, double %theta1, double %theta2, double %theta3, i64 %nSamples) { +entry: + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 1 + %fermionTermData = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %fermionTermData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 2 + %inputState = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %JWInputStates = load %Array*, %Array** %63, align 8 + %64 = call i64 @__quantum__rt__array_get_size_1d(%Array* %JWInputStates) + %65 = sub i64 %64, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %66 = phi i64 [ 0, %exit__4 ], [ %77, %exiting__5 ] + %67 = icmp sle i64 %66, %65 + br i1 %67, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %66) + %69 = bitcast i8* %68 to { { double, double }*, %Array* }** + %70 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %69, align 8 + %71 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %70, i32 0, i32 0 + %72 = load { double, double }*, { double, double }** %71, align 8 + %73 = bitcast { double, double }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 1) + %74 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %70, i32 0, i32 1 + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 1) + %76 = bitcast { { double, double }*, %Array* }* %70 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %76, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %77 = add i64 %66, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + %78 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + %79 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 0 + %nSpinOrbitals = load i64, i64* %80, align 4 + %81 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %82 = phi i64 [ 0, %exit__5 ], [ %92, %exiting__6 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %82) + %85 = bitcast i8* %84 to { %Array*, %Array* }** + %86 = load { %Array*, %Array* }*, { %Array*, %Array* }** %85, align 8 + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + %89 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 1 + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 1) + %91 = bitcast { %Array*, %Array* }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %92 = add i64 %82, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %93 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %94 = phi i64 [ 0, %exit__6 ], [ %104, %exiting__7 ] + %95 = icmp sle i64 %94, %93 + br i1 %95, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %94) + %97 = bitcast i8* %96 to { %Array*, %Array* }** + %98 = load { %Array*, %Array* }*, { %Array*, %Array* }** %97, align 8 + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 1 + %102 = load %Array*, %Array** %101, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %103 = bitcast { %Array*, %Array* }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %104 = add i64 %94, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %105 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %106 = phi i64 [ 0, %exit__7 ], [ %116, %exiting__8 ] + %107 = icmp sle i64 %106, %105 + br i1 %107, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %106) + %109 = bitcast i8* %108 to { %Array*, %Array* }** + %110 = load { %Array*, %Array* }*, { %Array*, %Array* }** %109, align 8 + %111 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 0 + %112 = load %Array*, %Array** %111, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %112, i32 1) + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 1 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 1) + %115 = bitcast { %Array*, %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %115, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %116 = add i64 %106, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %117 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %118 = phi i64 [ 0, %exit__8 ], [ %128, %exiting__9 ] + %119 = icmp sle i64 %118, %117 + br i1 %119, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %118) + %121 = bitcast i8* %120 to { %Array*, %Array* }** + %122 = load { %Array*, %Array* }*, { %Array*, %Array* }** %121, align 8 + %123 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %122, i32 0, i32 0 + %124 = load %Array*, %Array** %123, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %124, i32 1) + %125 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %122, i32 0, i32 1 + %126 = load %Array*, %Array** %125, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %126, i32 1) + %127 = bitcast { %Array*, %Array* }* %122 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %127, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %128 = add i64 %118, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %129 = sub i64 %64, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %130 = phi i64 [ 0, %exit__9 ], [ %141, %exiting__10 ] + %131 = icmp sle i64 %130, %129 + br i1 %131, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %132 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %130) + %133 = bitcast i8* %132 to { { double, double }*, %Array* }** + %134 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %133, align 8 + %135 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %134, i32 0, i32 0 + %136 = load { double, double }*, { double, double }** %135, align 8 + %137 = bitcast { double, double }* %136 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %137, i32 1) + %138 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %134, i32 0, i32 1 + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + %140 = bitcast { { double, double }*, %Array* }* %134 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %140, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %141 = add i64 %130, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + %142 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 3 + %energyOffset = load double, double* %142, align 8 + %143 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 0 + %stateType = load i64, i64* %143, align 4 + %144 = sub i64 %64, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %145 = phi i64 [ 0, %exit__10 ], [ %156, %exiting__11 ] + %146 = icmp sle i64 %145, %144 + br i1 %146, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %145) + %148 = bitcast i8* %147 to { { double, double }*, %Array* }** + %149 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %148, align 8 + %150 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %149, i32 0, i32 0 + %151 = load { double, double }*, { double, double }** %150, align 8 + %152 = bitcast { double, double }* %151 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %152, i32 1) + %153 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %149, i32 0, i32 1 + %154 = load %Array*, %Array** %153, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 1) + %155 = bitcast { { double, double }*, %Array* }* %149 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %155, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %156 = add i64 %145, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + %157 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %158 = bitcast %Tuple* %157 to { double, double }* + %159 = getelementptr inbounds { double, double }, { double, double }* %158, i32 0, i32 0 + %160 = getelementptr inbounds { double, double }, { double, double }* %158, i32 0, i32 1 + store double %theta1, double* %159, align 8 + store double 0.000000e+00, double* %160, align 8 + %161 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 0) + %163 = bitcast i8* %162 to i64* + %164 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 1) + %165 = bitcast i8* %164 to i64* + store i64 2, i64* %163, align 4 + store i64 0, i64* %165, align 4 + %166 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %158, %Array* %161) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %157, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 -1) + %167 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %168 = bitcast %Tuple* %167 to { double, double }* + %169 = getelementptr inbounds { double, double }, { double, double }* %168, i32 0, i32 0 + %170 = getelementptr inbounds { double, double }, { double, double }* %168, i32 0, i32 1 + store double %theta2, double* %169, align 8 + store double 0.000000e+00, double* %170, align 8 + %171 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %171, i64 0) + %173 = bitcast i8* %172 to i64* + %174 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %171, i64 1) + %175 = bitcast i8* %174 to i64* + store i64 3, i64* %173, align 4 + store i64 1, i64* %175, align 4 + %176 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %168, %Array* %171) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %167, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %171, i32 -1) + %177 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %178 = bitcast %Tuple* %177 to { double, double }* + %179 = getelementptr inbounds { double, double }, { double, double }* %178, i32 0, i32 0 + %180 = getelementptr inbounds { double, double }, { double, double }* %178, i32 0, i32 1 + store double %theta3, double* %179, align 8 + store double 0.000000e+00, double* %180, align 8 + %181 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 0) + %183 = bitcast i8* %182 to i64* + %184 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 1) + %185 = bitcast i8* %184 to i64* + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 2) + %187 = bitcast i8* %186 to i64* + %188 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 3) + %189 = bitcast i8* %188 to i64* + store i64 2, i64* %183, align 4 + store i64 3, i64* %185, align 4 + store i64 1, i64* %187, align 4 + store i64 0, i64* %189, align 4 + %190 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %178, %Array* %181) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %177, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 -1) + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 0) + %192 = bitcast i8* %191 to { { double, double }*, %Array* }** + %193 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %192, align 8 + %194 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %193, i32 0, i32 0 + %195 = load { double, double }*, { double, double }** %194, align 8 + %196 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %193, i32 0, i32 1 + %197 = load %Array*, %Array** %196, align 8 + %198 = bitcast { double, double }* %195 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %198, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %197, i32 1) + %199 = bitcast { { double, double }*, %Array* }* %193 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %199, i32 1) + %200 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 0) + %202 = bitcast i8* %201 to { { double, double }*, %Array* }** + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 1) + %204 = bitcast i8* %203 to { { double, double }*, %Array* }** + %205 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 2) + %206 = bitcast i8* %205 to { { double, double }*, %Array* }** + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 3) + %208 = bitcast i8* %207 to { { double, double }*, %Array* }** + store { { double, double }*, %Array* }* %166, { { double, double }*, %Array* }** %202, align 8 + store { { double, double }*, %Array* }* %176, { { double, double }*, %Array* }** %204, align 8 + store { { double, double }*, %Array* }* %190, { { double, double }*, %Array* }** %206, align 8 + store { { double, double }*, %Array* }* %193, { { double, double }*, %Array* }** %208, align 8 + %209 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %inputStateParam = bitcast %Tuple* %209 to { i64, %Array* }* + %210 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputStateParam, i32 0, i32 0 + %211 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputStateParam, i32 0, i32 1 + store i64 %stateType, i64* %210, align 4 + store %Array* %200, %Array** %211, align 8 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %212 = phi i64 [ 0, %exit__11 ], [ %223, %exiting__12 ] + %213 = icmp sle i64 %212, 3 + br i1 %213, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %212) + %215 = bitcast i8* %214 to { { double, double }*, %Array* }** + %216 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %216, i32 0, i32 0 + %218 = load { double, double }*, { double, double }** %217, align 8 + %219 = bitcast { double, double }* %218 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %219, i32 1) + %220 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %216, i32 0, i32 1 + %221 = load %Array*, %Array** %220, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %221, i32 1) + %222 = bitcast { { double, double }*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %222, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %223 = add i64 %212, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 1) + %JWEncodedDataParam = call { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerEncodingData__body(i64 %nSpinOrbitals, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, { i64, %Array* }* %inputState, double %energyOffset) + %224 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i32 0, i32 1 + %225 = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %224, align 8 + %226 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 0 + %227 = load %Array*, %Array** %226, align 8 + %228 = call i64 @__quantum__rt__array_get_size_1d(%Array* %227) + %229 = sub i64 %228, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %230 = phi i64 [ 0, %exit__12 ], [ %240, %exiting__13 ] + %231 = icmp sle i64 %230, %229 + br i1 %231, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %232 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %230) + %233 = bitcast i8* %232 to { %Array*, %Array* }** + %234 = load { %Array*, %Array* }*, { %Array*, %Array* }** %233, align 8 + %235 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %234, i32 0, i32 0 + %236 = load %Array*, %Array** %235, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 1) + %237 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %234, i32 0, i32 1 + %238 = load %Array*, %Array** %237, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %238, i32 1) + %239 = bitcast { %Array*, %Array* }* %234 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %239, i32 1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %240 = add i64 %230, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %227, i32 1) + %241 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 1 + %242 = load %Array*, %Array** %241, align 8 + %243 = call i64 @__quantum__rt__array_get_size_1d(%Array* %242) + %244 = sub i64 %243, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %245 = phi i64 [ 0, %exit__13 ], [ %255, %exiting__14 ] + %246 = icmp sle i64 %245, %244 + br i1 %246, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %247 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %245) + %248 = bitcast i8* %247 to { %Array*, %Array* }** + %249 = load { %Array*, %Array* }*, { %Array*, %Array* }** %248, align 8 + %250 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %249, i32 0, i32 0 + %251 = load %Array*, %Array** %250, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %251, i32 1) + %252 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %249, i32 0, i32 1 + %253 = load %Array*, %Array** %252, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %253, i32 1) + %254 = bitcast { %Array*, %Array* }* %249 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %254, i32 1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %255 = add i64 %245, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 1) + %256 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 2 + %257 = load %Array*, %Array** %256, align 8 + %258 = call i64 @__quantum__rt__array_get_size_1d(%Array* %257) + %259 = sub i64 %258, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %260 = phi i64 [ 0, %exit__14 ], [ %270, %exiting__15 ] + %261 = icmp sle i64 %260, %259 + br i1 %261, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %262 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %260) + %263 = bitcast i8* %262 to { %Array*, %Array* }** + %264 = load { %Array*, %Array* }*, { %Array*, %Array* }** %263, align 8 + %265 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %264, i32 0, i32 0 + %266 = load %Array*, %Array** %265, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %266, i32 1) + %267 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %264, i32 0, i32 1 + %268 = load %Array*, %Array** %267, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %268, i32 1) + %269 = bitcast { %Array*, %Array* }* %264 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %269, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %270 = add i64 %260, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %257, i32 1) + %271 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 3 + %272 = load %Array*, %Array** %271, align 8 + %273 = call i64 @__quantum__rt__array_get_size_1d(%Array* %272) + %274 = sub i64 %273, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %275 = phi i64 [ 0, %exit__15 ], [ %285, %exiting__16 ] + %276 = icmp sle i64 %275, %274 + br i1 %276, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %277 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %275) + %278 = bitcast i8* %277 to { %Array*, %Array* }** + %279 = load { %Array*, %Array* }*, { %Array*, %Array* }** %278, align 8 + %280 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 0 + %281 = load %Array*, %Array** %280, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %281, i32 1) + %282 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 1 + %283 = load %Array*, %Array** %282, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %283, i32 1) + %284 = bitcast { %Array*, %Array* }* %279 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %284, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %285 = add i64 %275, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %272, i32 1) + %286 = bitcast { %Array*, %Array*, %Array*, %Array* }* %225 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %286, i32 1) + %287 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i32 0, i32 2 + %288 = load { i64, %Array* }*, { i64, %Array* }** %287, align 8 + %289 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %288, i32 0, i32 1 + %290 = load %Array*, %Array** %289, align 8 + %291 = call i64 @__quantum__rt__array_get_size_1d(%Array* %290) + %292 = sub i64 %291, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %293 = phi i64 [ 0, %exit__16 ], [ %304, %exiting__17 ] + %294 = icmp sle i64 %293, %292 + br i1 %294, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %295 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %293) + %296 = bitcast i8* %295 to { { double, double }*, %Array* }** + %297 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %296, align 8 + %298 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %297, i32 0, i32 0 + %299 = load { double, double }*, { double, double }** %298, align 8 + %300 = bitcast { double, double }* %299 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %300, i32 1) + %301 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %297, i32 0, i32 1 + %302 = load %Array*, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %302, i32 1) + %303 = bitcast { { double, double }*, %Array* }* %297 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %303, i32 1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %304 = add i64 %293, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %290, i32 1) + %305 = bitcast { i64, %Array* }* %288 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %305, i32 1) + %306 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %306, i32 1) + %307 = call double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateEnergy__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i64 %nSamples) + %308 = sub i64 %3, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %309 = phi i64 [ 0, %exit__17 ], [ %319, %exiting__18 ] + %310 = icmp sle i64 %309, %308 + br i1 %310, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %311 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %309) + %312 = bitcast i8* %311 to { %Array*, %Array* }** + %313 = load { %Array*, %Array* }*, { %Array*, %Array* }** %312, align 8 + %314 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %313, i32 0, i32 0 + %315 = load %Array*, %Array** %314, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %315, i32 -1) + %316 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %313, i32 0, i32 1 + %317 = load %Array*, %Array** %316, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %317, i32 -1) + %318 = bitcast { %Array*, %Array* }* %313 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %318, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %319 = add i64 %309, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %320 = sub i64 %18, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %321 = phi i64 [ 0, %exit__18 ], [ %331, %exiting__19 ] + %322 = icmp sle i64 %321, %320 + br i1 %322, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %323 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %321) + %324 = bitcast i8* %323 to { %Array*, %Array* }** + %325 = load { %Array*, %Array* }*, { %Array*, %Array* }** %324, align 8 + %326 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %325, i32 0, i32 0 + %327 = load %Array*, %Array** %326, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %327, i32 -1) + %328 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %325, i32 0, i32 1 + %329 = load %Array*, %Array** %328, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %329, i32 -1) + %330 = bitcast { %Array*, %Array* }* %325 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %330, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %331 = add i64 %321, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %332 = sub i64 %33, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %333 = phi i64 [ 0, %exit__19 ], [ %343, %exiting__20 ] + %334 = icmp sle i64 %333, %332 + br i1 %334, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %335 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %333) + %336 = bitcast i8* %335 to { %Array*, %Array* }** + %337 = load { %Array*, %Array* }*, { %Array*, %Array* }** %336, align 8 + %338 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %337, i32 0, i32 0 + %339 = load %Array*, %Array** %338, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %339, i32 -1) + %340 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %337, i32 0, i32 1 + %341 = load %Array*, %Array** %340, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %341, i32 -1) + %342 = bitcast { %Array*, %Array* }* %337 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %342, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %343 = add i64 %333, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %344 = sub i64 %48, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %345 = phi i64 [ 0, %exit__20 ], [ %355, %exiting__21 ] + %346 = icmp sle i64 %345, %344 + br i1 %346, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %347 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %345) + %348 = bitcast i8* %347 to { %Array*, %Array* }** + %349 = load { %Array*, %Array* }*, { %Array*, %Array* }** %348, align 8 + %350 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %349, i32 0, i32 0 + %351 = load %Array*, %Array** %350, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %351, i32 -1) + %352 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %349, i32 0, i32 1 + %353 = load %Array*, %Array** %352, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %353, i32 -1) + %354 = bitcast { %Array*, %Array* }* %349 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %354, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %355 = add i64 %345, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %356 = sub i64 %64, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %357 = phi i64 [ 0, %exit__21 ], [ %368, %exiting__22 ] + %358 = icmp sle i64 %357, %356 + br i1 %358, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %359 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %357) + %360 = bitcast i8* %359 to { { double, double }*, %Array* }** + %361 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %360, align 8 + %362 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %361, i32 0, i32 0 + %363 = load { double, double }*, { double, double }** %362, align 8 + %364 = bitcast { double, double }* %363 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %364, i32 -1) + %365 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %361, i32 0, i32 1 + %366 = load %Array*, %Array** %365, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %366, i32 -1) + %367 = bitcast { { double, double }*, %Array* }* %361 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %367, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %368 = add i64 %357, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + %369 = sub i64 %3, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %370 = phi i64 [ 0, %exit__22 ], [ %380, %exiting__23 ] + %371 = icmp sle i64 %370, %369 + br i1 %371, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %372 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %370) + %373 = bitcast i8* %372 to { %Array*, %Array* }** + %374 = load { %Array*, %Array* }*, { %Array*, %Array* }** %373, align 8 + %375 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %374, i32 0, i32 0 + %376 = load %Array*, %Array** %375, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %376, i32 -1) + %377 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %374, i32 0, i32 1 + %378 = load %Array*, %Array** %377, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %378, i32 -1) + %379 = bitcast { %Array*, %Array* }* %374 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %379, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %380 = add i64 %370, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %381 = sub i64 %18, 1 + br label %header__24 + +header__24: ; preds = %exiting__24, %exit__23 + %382 = phi i64 [ 0, %exit__23 ], [ %392, %exiting__24 ] + %383 = icmp sle i64 %382, %381 + br i1 %383, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %384 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %382) + %385 = bitcast i8* %384 to { %Array*, %Array* }** + %386 = load { %Array*, %Array* }*, { %Array*, %Array* }** %385, align 8 + %387 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %386, i32 0, i32 0 + %388 = load %Array*, %Array** %387, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %388, i32 -1) + %389 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %386, i32 0, i32 1 + %390 = load %Array*, %Array** %389, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %390, i32 -1) + %391 = bitcast { %Array*, %Array* }* %386 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %391, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %392 = add i64 %382, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %393 = sub i64 %33, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %394 = phi i64 [ 0, %exit__24 ], [ %404, %exiting__25 ] + %395 = icmp sle i64 %394, %393 + br i1 %395, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %396 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %394) + %397 = bitcast i8* %396 to { %Array*, %Array* }** + %398 = load { %Array*, %Array* }*, { %Array*, %Array* }** %397, align 8 + %399 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %398, i32 0, i32 0 + %400 = load %Array*, %Array** %399, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %400, i32 -1) + %401 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %398, i32 0, i32 1 + %402 = load %Array*, %Array** %401, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %402, i32 -1) + %403 = bitcast { %Array*, %Array* }* %398 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %403, i32 -1) + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %404 = add i64 %394, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %405 = sub i64 %48, 1 + br label %header__26 + +header__26: ; preds = %exiting__26, %exit__25 + %406 = phi i64 [ 0, %exit__25 ], [ %416, %exiting__26 ] + %407 = icmp sle i64 %406, %405 + br i1 %407, label %body__26, label %exit__26 + +body__26: ; preds = %header__26 + %408 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %406) + %409 = bitcast i8* %408 to { %Array*, %Array* }** + %410 = load { %Array*, %Array* }*, { %Array*, %Array* }** %409, align 8 + %411 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %410, i32 0, i32 0 + %412 = load %Array*, %Array** %411, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %412, i32 -1) + %413 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %410, i32 0, i32 1 + %414 = load %Array*, %Array** %413, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %414, i32 -1) + %415 = bitcast { %Array*, %Array* }* %410 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %415, i32 -1) + br label %exiting__26 + +exiting__26: ; preds = %body__26 + %416 = add i64 %406, 1 + br label %header__26 + +exit__26: ; preds = %header__26 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %417 = sub i64 %64, 1 + br label %header__27 + +header__27: ; preds = %exiting__27, %exit__26 + %418 = phi i64 [ 0, %exit__26 ], [ %429, %exiting__27 ] + %419 = icmp sle i64 %418, %417 + br i1 %419, label %body__27, label %exit__27 + +body__27: ; preds = %header__27 + %420 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %418) + %421 = bitcast i8* %420 to { { double, double }*, %Array* }** + %422 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %421, align 8 + %423 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %422, i32 0, i32 0 + %424 = load { double, double }*, { double, double }** %423, align 8 + %425 = bitcast { double, double }* %424 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %425, i32 -1) + %426 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %422, i32 0, i32 1 + %427 = load %Array*, %Array** %426, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %427, i32 -1) + %428 = bitcast { { double, double }*, %Array* }* %422 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %428, i32 -1) + br label %exiting__27 + +exiting__27: ; preds = %body__27 + %429 = add i64 %418, 1 + br label %header__27 + +exit__27: ; preds = %header__27 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + %430 = sub i64 %64, 1 + br label %header__28 + +header__28: ; preds = %exiting__28, %exit__27 + %431 = phi i64 [ 0, %exit__27 ], [ %442, %exiting__28 ] + %432 = icmp sle i64 %431, %430 + br i1 %432, label %body__28, label %exit__28 + +body__28: ; preds = %header__28 + %433 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %431) + %434 = bitcast i8* %433 to { { double, double }*, %Array* }** + %435 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %434, align 8 + %436 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %435, i32 0, i32 0 + %437 = load { double, double }*, { double, double }** %436, align 8 + %438 = bitcast { double, double }* %437 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %438, i32 -1) + %439 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %435, i32 0, i32 1 + %440 = load %Array*, %Array** %439, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %440, i32 -1) + %441 = bitcast { { double, double }*, %Array* }* %435 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %441, i32 -1) + br label %exiting__28 + +exiting__28: ; preds = %body__28 + %442 = add i64 %431, 1 + br label %header__28 + +exit__28: ; preds = %header__28 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + br label %header__29 + +header__29: ; preds = %exiting__29, %exit__28 + %443 = phi i64 [ 0, %exit__28 ], [ %454, %exiting__29 ] + %444 = icmp sle i64 %443, 3 + br i1 %444, label %body__29, label %exit__29 + +body__29: ; preds = %header__29 + %445 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %443) + %446 = bitcast i8* %445 to { { double, double }*, %Array* }** + %447 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %446, align 8 + %448 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %447, i32 0, i32 0 + %449 = load { double, double }*, { double, double }** %448, align 8 + %450 = bitcast { double, double }* %449 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %450, i32 -1) + %451 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %447, i32 0, i32 1 + %452 = load %Array*, %Array** %451, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %452, i32 -1) + %453 = bitcast { { double, double }*, %Array* }* %447 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %453, i32 -1) + br label %exiting__29 + +exiting__29: ; preds = %body__29 + %454 = add i64 %443, 1 + br label %header__29 + +exit__29: ; preds = %header__29 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 -1) + %455 = sub i64 %228, 1 + br label %header__30 + +header__30: ; preds = %exiting__30, %exit__29 + %456 = phi i64 [ 0, %exit__29 ], [ %466, %exiting__30 ] + %457 = icmp sle i64 %456, %455 + br i1 %457, label %body__30, label %exit__30 + +body__30: ; preds = %header__30 + %458 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %456) + %459 = bitcast i8* %458 to { %Array*, %Array* }** + %460 = load { %Array*, %Array* }*, { %Array*, %Array* }** %459, align 8 + %461 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %460, i32 0, i32 0 + %462 = load %Array*, %Array** %461, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %462, i32 -1) + %463 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %460, i32 0, i32 1 + %464 = load %Array*, %Array** %463, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %464, i32 -1) + %465 = bitcast { %Array*, %Array* }* %460 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %465, i32 -1) + br label %exiting__30 + +exiting__30: ; preds = %body__30 + %466 = add i64 %456, 1 + br label %header__30 + +exit__30: ; preds = %header__30 + call void @__quantum__rt__array_update_alias_count(%Array* %227, i32 -1) + %467 = sub i64 %243, 1 + br label %header__31 + +header__31: ; preds = %exiting__31, %exit__30 + %468 = phi i64 [ 0, %exit__30 ], [ %478, %exiting__31 ] + %469 = icmp sle i64 %468, %467 + br i1 %469, label %body__31, label %exit__31 + +body__31: ; preds = %header__31 + %470 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %468) + %471 = bitcast i8* %470 to { %Array*, %Array* }** + %472 = load { %Array*, %Array* }*, { %Array*, %Array* }** %471, align 8 + %473 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %472, i32 0, i32 0 + %474 = load %Array*, %Array** %473, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %474, i32 -1) + %475 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %472, i32 0, i32 1 + %476 = load %Array*, %Array** %475, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %476, i32 -1) + %477 = bitcast { %Array*, %Array* }* %472 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %477, i32 -1) + br label %exiting__31 + +exiting__31: ; preds = %body__31 + %478 = add i64 %468, 1 + br label %header__31 + +exit__31: ; preds = %header__31 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 -1) + %479 = sub i64 %258, 1 + br label %header__32 + +header__32: ; preds = %exiting__32, %exit__31 + %480 = phi i64 [ 0, %exit__31 ], [ %490, %exiting__32 ] + %481 = icmp sle i64 %480, %479 + br i1 %481, label %body__32, label %exit__32 + +body__32: ; preds = %header__32 + %482 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %480) + %483 = bitcast i8* %482 to { %Array*, %Array* }** + %484 = load { %Array*, %Array* }*, { %Array*, %Array* }** %483, align 8 + %485 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %484, i32 0, i32 0 + %486 = load %Array*, %Array** %485, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %486, i32 -1) + %487 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %484, i32 0, i32 1 + %488 = load %Array*, %Array** %487, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %488, i32 -1) + %489 = bitcast { %Array*, %Array* }* %484 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %489, i32 -1) + br label %exiting__32 + +exiting__32: ; preds = %body__32 + %490 = add i64 %480, 1 + br label %header__32 + +exit__32: ; preds = %header__32 + call void @__quantum__rt__array_update_alias_count(%Array* %257, i32 -1) + %491 = sub i64 %273, 1 + br label %header__33 + +header__33: ; preds = %exiting__33, %exit__32 + %492 = phi i64 [ 0, %exit__32 ], [ %502, %exiting__33 ] + %493 = icmp sle i64 %492, %491 + br i1 %493, label %body__33, label %exit__33 + +body__33: ; preds = %header__33 + %494 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %492) + %495 = bitcast i8* %494 to { %Array*, %Array* }** + %496 = load { %Array*, %Array* }*, { %Array*, %Array* }** %495, align 8 + %497 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %496, i32 0, i32 0 + %498 = load %Array*, %Array** %497, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %498, i32 -1) + %499 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %496, i32 0, i32 1 + %500 = load %Array*, %Array** %499, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %500, i32 -1) + %501 = bitcast { %Array*, %Array* }* %496 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %501, i32 -1) + br label %exiting__33 + +exiting__33: ; preds = %body__33 + %502 = add i64 %492, 1 + br label %header__33 + +exit__33: ; preds = %header__33 + call void @__quantum__rt__array_update_alias_count(%Array* %272, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %286, i32 -1) + %503 = sub i64 %291, 1 + br label %header__34 + +header__34: ; preds = %exiting__34, %exit__33 + %504 = phi i64 [ 0, %exit__33 ], [ %515, %exiting__34 ] + %505 = icmp sle i64 %504, %503 + br i1 %505, label %body__34, label %exit__34 + +body__34: ; preds = %header__34 + %506 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %504) + %507 = bitcast i8* %506 to { { double, double }*, %Array* }** + %508 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %507, align 8 + %509 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %508, i32 0, i32 0 + %510 = load { double, double }*, { double, double }** %509, align 8 + %511 = bitcast { double, double }* %510 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %511, i32 -1) + %512 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %508, i32 0, i32 1 + %513 = load %Array*, %Array** %512, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %513, i32 -1) + %514 = bitcast { { double, double }*, %Array* }* %508 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %514, i32 -1) + br label %exiting__34 + +exiting__34: ; preds = %body__34 + %515 = add i64 %504, 1 + br label %header__34 + +exit__34: ; preds = %header__34 + call void @__quantum__rt__array_update_alias_count(%Array* %290, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %305, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %306, i32 -1) + br label %header__35 + +header__35: ; preds = %exiting__35, %exit__34 + %516 = phi i64 [ 0, %exit__34 ], [ %527, %exiting__35 ] + %517 = icmp sle i64 %516, 3 + br i1 %517, label %body__35, label %exit__35 + +body__35: ; preds = %header__35 + %518 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %516) + %519 = bitcast i8* %518 to { { double, double }*, %Array* }** + %520 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %519, align 8 + %521 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %520, i32 0, i32 0 + %522 = load { double, double }*, { double, double }** %521, align 8 + %523 = bitcast { double, double }* %522 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %523, i32 -1) + %524 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %520, i32 0, i32 1 + %525 = load %Array*, %Array** %524, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %525, i32 -1) + %526 = bitcast { { double, double }*, %Array* }* %520 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %526, i32 -1) + br label %exiting__35 + +exiting__35: ; preds = %body__35 + %527 = add i64 %516, 1 + br label %header__35 + +exit__35: ; preds = %header__35 + call void @__quantum__rt__array_update_reference_count(%Array* %200, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %209, i32 -1) + %528 = sub i64 %228, 1 + br label %header__36 + +header__36: ; preds = %exiting__36, %exit__35 + %529 = phi i64 [ 0, %exit__35 ], [ %539, %exiting__36 ] + %530 = icmp sle i64 %529, %528 + br i1 %530, label %body__36, label %exit__36 + +body__36: ; preds = %header__36 + %531 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %529) + %532 = bitcast i8* %531 to { %Array*, %Array* }** + %533 = load { %Array*, %Array* }*, { %Array*, %Array* }** %532, align 8 + %534 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %533, i32 0, i32 0 + %535 = load %Array*, %Array** %534, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %535, i32 -1) + %536 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %533, i32 0, i32 1 + %537 = load %Array*, %Array** %536, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %537, i32 -1) + %538 = bitcast { %Array*, %Array* }* %533 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %538, i32 -1) + br label %exiting__36 + +exiting__36: ; preds = %body__36 + %539 = add i64 %529, 1 + br label %header__36 + +exit__36: ; preds = %header__36 + call void @__quantum__rt__array_update_reference_count(%Array* %227, i32 -1) + %540 = sub i64 %243, 1 + br label %header__37 + +header__37: ; preds = %exiting__37, %exit__36 + %541 = phi i64 [ 0, %exit__36 ], [ %551, %exiting__37 ] + %542 = icmp sle i64 %541, %540 + br i1 %542, label %body__37, label %exit__37 + +body__37: ; preds = %header__37 + %543 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %541) + %544 = bitcast i8* %543 to { %Array*, %Array* }** + %545 = load { %Array*, %Array* }*, { %Array*, %Array* }** %544, align 8 + %546 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %545, i32 0, i32 0 + %547 = load %Array*, %Array** %546, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %547, i32 -1) + %548 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %545, i32 0, i32 1 + %549 = load %Array*, %Array** %548, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %549, i32 -1) + %550 = bitcast { %Array*, %Array* }* %545 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %550, i32 -1) + br label %exiting__37 + +exiting__37: ; preds = %body__37 + %551 = add i64 %541, 1 + br label %header__37 + +exit__37: ; preds = %header__37 + call void @__quantum__rt__array_update_reference_count(%Array* %242, i32 -1) + %552 = sub i64 %258, 1 + br label %header__38 + +header__38: ; preds = %exiting__38, %exit__37 + %553 = phi i64 [ 0, %exit__37 ], [ %563, %exiting__38 ] + %554 = icmp sle i64 %553, %552 + br i1 %554, label %body__38, label %exit__38 + +body__38: ; preds = %header__38 + %555 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %553) + %556 = bitcast i8* %555 to { %Array*, %Array* }** + %557 = load { %Array*, %Array* }*, { %Array*, %Array* }** %556, align 8 + %558 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %557, i32 0, i32 0 + %559 = load %Array*, %Array** %558, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %559, i32 -1) + %560 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %557, i32 0, i32 1 + %561 = load %Array*, %Array** %560, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %561, i32 -1) + %562 = bitcast { %Array*, %Array* }* %557 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %562, i32 -1) + br label %exiting__38 + +exiting__38: ; preds = %body__38 + %563 = add i64 %553, 1 + br label %header__38 + +exit__38: ; preds = %header__38 + call void @__quantum__rt__array_update_reference_count(%Array* %257, i32 -1) + %564 = sub i64 %273, 1 + br label %header__39 + +header__39: ; preds = %exiting__39, %exit__38 + %565 = phi i64 [ 0, %exit__38 ], [ %575, %exiting__39 ] + %566 = icmp sle i64 %565, %564 + br i1 %566, label %body__39, label %exit__39 + +body__39: ; preds = %header__39 + %567 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %565) + %568 = bitcast i8* %567 to { %Array*, %Array* }** + %569 = load { %Array*, %Array* }*, { %Array*, %Array* }** %568, align 8 + %570 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %569, i32 0, i32 0 + %571 = load %Array*, %Array** %570, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %571, i32 -1) + %572 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %569, i32 0, i32 1 + %573 = load %Array*, %Array** %572, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %573, i32 -1) + %574 = bitcast { %Array*, %Array* }* %569 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %574, i32 -1) + br label %exiting__39 + +exiting__39: ; preds = %body__39 + %575 = add i64 %565, 1 + br label %header__39 + +exit__39: ; preds = %header__39 + call void @__quantum__rt__array_update_reference_count(%Array* %272, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %286, i32 -1) + %576 = sub i64 %291, 1 + br label %header__40 + +header__40: ; preds = %exiting__40, %exit__39 + %577 = phi i64 [ 0, %exit__39 ], [ %588, %exiting__40 ] + %578 = icmp sle i64 %577, %576 + br i1 %578, label %body__40, label %exit__40 + +body__40: ; preds = %header__40 + %579 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %577) + %580 = bitcast i8* %579 to { { double, double }*, %Array* }** + %581 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %580, align 8 + %582 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %581, i32 0, i32 0 + %583 = load { double, double }*, { double, double }** %582, align 8 + %584 = bitcast { double, double }* %583 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %584, i32 -1) + %585 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %581, i32 0, i32 1 + %586 = load %Array*, %Array** %585, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %586, i32 -1) + %587 = bitcast { { double, double }*, %Array* }* %581 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %587, i32 -1) + br label %exiting__40 + +exiting__40: ; preds = %body__40 + %588 = add i64 %577, 1 + br label %header__40 + +exit__40: ; preds = %header__40 + call void @__quantum__rt__array_update_reference_count(%Array* %290, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %305, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %306, i32 -1) + ret double %307 +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %0, %Array* %__Item3__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }*, %Array* }* getelementptr ({ { double, double }*, %Array* }, { { double, double }*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { double, double }*, %Array* }* + %3 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %2, i32 0, i32 1 + store { double, double }* %0, { double, double }** %3, align 8 + store %Array* %__Item3__, %Array** %4, align 8 + %5 = bitcast { double, double }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__Item3__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 -1) + ret { { double, double }*, %Array* }* %2 +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerEncodingData__body(i64 %__Item1__, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, { i64, %Array* }* %0, double %__Item5__) { +entry: + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* getelementptr ({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* null, i32 1) to i64)) + %63 = bitcast %Tuple* %62 to { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* + %64 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 0 + %65 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 1 + %66 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 2 + %67 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 3 + store i64 %__Item1__, i64* %64, align 4 + store { %Array*, %Array*, %Array*, %Array* }* %__Item2__, { %Array*, %Array*, %Array*, %Array* }** %65, align 8 + store { i64, %Array* }* %0, { i64, %Array* }** %66, align 8 + store double %__Item5__, double* %67, align 8 + %68 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 0 + %69 = load %Array*, %Array** %68, align 8 + %70 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 1 + %71 = load %Array*, %Array** %70, align 8 + %72 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 2 + %73 = load %Array*, %Array** %72, align 8 + %74 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 3 + %75 = load %Array*, %Array** %74, align 8 + %76 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %77 = load %Array*, %Array** %76, align 8 + %78 = call i64 @__quantum__rt__array_get_size_1d(%Array* %69) + %79 = sub i64 %78, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %80 = phi i64 [ 0, %exit__4 ], [ %90, %exiting__5 ] + %81 = icmp sle i64 %80, %79 + br i1 %81, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %80) + %83 = bitcast i8* %82 to { %Array*, %Array* }** + %84 = load { %Array*, %Array* }*, { %Array*, %Array* }** %83, align 8 + %85 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %84, i32 0, i32 0 + %86 = load %Array*, %Array** %85, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %86, i32 1) + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %84, i32 0, i32 1 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %88, i32 1) + %89 = bitcast { %Array*, %Array* }* %84 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %89, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %90 = add i64 %80, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 1) + %91 = call i64 @__quantum__rt__array_get_size_1d(%Array* %71) + %92 = sub i64 %91, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %93 = phi i64 [ 0, %exit__5 ], [ %103, %exiting__6 ] + %94 = icmp sle i64 %93, %92 + br i1 %94, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 %93) + %96 = bitcast i8* %95 to { %Array*, %Array* }** + %97 = load { %Array*, %Array* }*, { %Array*, %Array* }** %96, align 8 + %98 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %97, i32 0, i32 0 + %99 = load %Array*, %Array** %98, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 1) + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %97, i32 0, i32 1 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %101, i32 1) + %102 = bitcast { %Array*, %Array* }* %97 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %102, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %103 = add i64 %93, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 1) + %104 = call i64 @__quantum__rt__array_get_size_1d(%Array* %73) + %105 = sub i64 %104, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %106 = phi i64 [ 0, %exit__6 ], [ %116, %exiting__7 ] + %107 = icmp sle i64 %106, %105 + br i1 %107, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %106) + %109 = bitcast i8* %108 to { %Array*, %Array* }** + %110 = load { %Array*, %Array* }*, { %Array*, %Array* }** %109, align 8 + %111 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 0 + %112 = load %Array*, %Array** %111, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %112, i32 1) + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 1 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 1) + %115 = bitcast { %Array*, %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %115, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %116 = add i64 %106, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 1) + %117 = call i64 @__quantum__rt__array_get_size_1d(%Array* %75) + %118 = sub i64 %117, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %119 = phi i64 [ 0, %exit__7 ], [ %129, %exiting__8 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %75, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %129 = add i64 %119, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %75, i32 1) + %130 = bitcast { %Array*, %Array*, %Array*, %Array* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %130, i32 1) + %131 = call i64 @__quantum__rt__array_get_size_1d(%Array* %77) + %132 = sub i64 %131, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %133 = phi i64 [ 0, %exit__8 ], [ %144, %exiting__9 ] + %134 = icmp sle i64 %133, %132 + br i1 %134, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 %133) + %136 = bitcast i8* %135 to { { double, double }*, %Array* }** + %137 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %136, align 8 + %138 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %137, i32 0, i32 0 + %139 = load { double, double }*, { double, double }** %138, align 8 + %140 = bitcast { double, double }* %139 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %140, i32 1) + %141 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %137, i32 0, i32 1 + %142 = load %Array*, %Array** %141, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %142, i32 1) + %143 = bitcast { { double, double }*, %Array* }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %143, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %144 = add i64 %133, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %77, i32 1) + %145 = bitcast { i64, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %145, i32 1) + %146 = sub i64 %3, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %147 = phi i64 [ 0, %exit__9 ], [ %157, %exiting__10 ] + %148 = icmp sle i64 %147, %146 + br i1 %148, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %149 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %147) + %150 = bitcast i8* %149 to { %Array*, %Array* }** + %151 = load { %Array*, %Array* }*, { %Array*, %Array* }** %150, align 8 + %152 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %151, i32 0, i32 0 + %153 = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %153, i32 -1) + %154 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %151, i32 0, i32 1 + %155 = load %Array*, %Array** %154, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %155, i32 -1) + %156 = bitcast { %Array*, %Array* }* %151 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %156, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %157 = add i64 %147, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %158 = sub i64 %18, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %159 = phi i64 [ 0, %exit__10 ], [ %169, %exiting__11 ] + %160 = icmp sle i64 %159, %158 + br i1 %160, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %161 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %159) + %162 = bitcast i8* %161 to { %Array*, %Array* }** + %163 = load { %Array*, %Array* }*, { %Array*, %Array* }** %162, align 8 + %164 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %163, i32 0, i32 0 + %165 = load %Array*, %Array** %164, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %165, i32 -1) + %166 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %163, i32 0, i32 1 + %167 = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + %168 = bitcast { %Array*, %Array* }* %163 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %168, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %169 = add i64 %159, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %170 = sub i64 %33, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %181, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %171) + %174 = bitcast i8* %173 to { %Array*, %Array* }** + %175 = load { %Array*, %Array* }*, { %Array*, %Array* }** %174, align 8 + %176 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %175, i32 0, i32 0 + %177 = load %Array*, %Array** %176, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %177, i32 -1) + %178 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %175, i32 0, i32 1 + %179 = load %Array*, %Array** %178, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %179, i32 -1) + %180 = bitcast { %Array*, %Array* }* %175 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %180, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %181 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %182 = sub i64 %48, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %183 = phi i64 [ 0, %exit__12 ], [ %193, %exiting__13 ] + %184 = icmp sle i64 %183, %182 + br i1 %184, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %183) + %186 = bitcast i8* %185 to { %Array*, %Array* }** + %187 = load { %Array*, %Array* }*, { %Array*, %Array* }** %186, align 8 + %188 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %187, i32 0, i32 0 + %189 = load %Array*, %Array** %188, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %189, i32 -1) + %190 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %187, i32 0, i32 1 + %191 = load %Array*, %Array** %190, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %191, i32 -1) + %192 = bitcast { %Array*, %Array* }* %187 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %192, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %193 = add i64 %183, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + ret { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63 +} + +define internal double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateEnergy__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i64 %nSamples) { +entry: + %energy = alloca double, align 8 + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 1 + %jwTerms = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %jwTerms to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 2 + %inputState = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %64 = load %Array*, %Array** %63, align 8 + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %64) + %66 = sub i64 %65, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %67 = phi i64 [ 0, %exit__4 ], [ %78, %exiting__5 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %67) + %70 = bitcast i8* %69 to { { double, double }*, %Array* }** + %71 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %70, align 8 + %72 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 0 + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %74, i32 1) + %75 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { { double, double }*, %Array* }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %78 = add i64 %67, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %79 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + store double 0.000000e+00, double* %energy, align 8 + %81 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 0 + %nQubits = load i64, i64* %81, align 4 + %82 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %83 = phi i64 [ 0, %exit__5 ], [ %93, %exiting__6 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %83) + %86 = bitcast i8* %85 to { %Array*, %Array* }** + %87 = load { %Array*, %Array* }*, { %Array*, %Array* }** %86, align 8 + %88 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %89, i32 1) + %90 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 1 + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 1) + %92 = bitcast { %Array*, %Array* }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %92, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %93 = add i64 %83, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %94 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %95 = phi i64 [ 0, %exit__6 ], [ %105, %exiting__7 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %95) + %98 = bitcast i8* %97 to { %Array*, %Array* }** + %99 = load { %Array*, %Array* }*, { %Array*, %Array* }** %98, align 8 + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 0 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %101, i32 1) + %102 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { %Array*, %Array* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %105 = add i64 %95, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %106 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %107 = phi i64 [ 0, %exit__7 ], [ %117, %exiting__8 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %107) + %110 = bitcast i8* %109 to { %Array*, %Array* }** + %111 = load { %Array*, %Array* }*, { %Array*, %Array* }** %110, align 8 + %112 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 0 + %113 = load %Array*, %Array** %112, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %113, i32 1) + %114 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 1 + %115 = load %Array*, %Array** %114, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %115, i32 1) + %116 = bitcast { %Array*, %Array* }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %117 = add i64 %107, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %118 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %119 = phi i64 [ 0, %exit__8 ], [ %129, %exiting__9 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %129 = add i64 %119, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %130 = sub i64 %65, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %131 = phi i64 [ 0, %exit__9 ], [ %142, %exiting__10 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %131) + %134 = bitcast i8* %133 to { { double, double }*, %Array* }** + %135 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %134, align 8 + %136 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 0 + %137 = load { double, double }*, { double, double }** %136, align 8 + %138 = bitcast { double, double }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %138, i32 1) + %139 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 1 + %140 = load %Array*, %Array** %139, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + %141 = bitcast { { double, double }*, %Array* }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %142 = add i64 %131, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %143 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 3 + %energyOffset = load double, double* %143, align 8 + %144 = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %jwTerms) + %145 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %144, i32 0, i32 0 + %nTerms = load i64, i64* %145, align 4 + %146 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %144, i32 0, i32 1 + %indexFunction = load %Callable*, %Callable** %146, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %indexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %indexFunction, i32 1) + %147 = sub i64 %nTerms, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %idxTerm = phi i64 [ 0, %exit__10 ], [ %166, %exiting__11 ] + %148 = icmp sle i64 %idxTerm, %147 + br i1 %148, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %149 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %150 = bitcast %Tuple* %149 to { i64 }* + %151 = getelementptr inbounds { i64 }, { i64 }* %150, i32 0, i32 0 + store i64 %idxTerm, i64* %151, align 4 + %152 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %indexFunction, %Tuple* %149, %Tuple* %152) + %153 = bitcast %Tuple* %152 to { { { %Array*, %Array* }*, %Array* }* }* + %154 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %153, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %154, align 8 + %155 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %156 = load { %Array*, %Array* }*, { %Array*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %157, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %158 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 1 + %coeff = load %Array*, %Array** %158, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %159 = bitcast { %Array*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 1) + %160 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %160, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %161 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %163 = bitcast i8* %162 to i64* + %termType = load i64, i64* %163, align 4 + %ops = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__MeasurementOperators__body(i64 %nQubits, %Array* %idxFermions, i64 %termType) + %164 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ops) + %165 = sub i64 %164, 1 + br label %header__12 + +exiting__11: ; preds = %exit__15 + %166 = add i64 %idxTerm, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + %167 = load double, double* %energy, align 8 + %168 = fadd double %167, %energyOffset + %169 = sub i64 %3, 1 + br label %header__16 + +header__12: ; preds = %exiting__12, %body__11 + %170 = phi i64 [ 0, %body__11 ], [ %175, %exiting__12 ] + %171 = icmp sle i64 %170, %165 + br i1 %171, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %170) + %173 = bitcast i8* %172 to %Array** + %174 = load %Array*, %Array** %173, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %174, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %175 = add i64 %170, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %coeffs = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__ExpandedCoefficients__body(%Array* %coeff, i64 %termType) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 1) + %176 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %177 = load %Array*, %Array** %63, align 8 + %178 = call i64 @__quantum__rt__array_get_size_1d(%Array* %177) + %179 = sub i64 %178, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %180 = phi i64 [ 0, %exit__12 ], [ %191, %exiting__13 ] + %181 = icmp sle i64 %180, %179 + br i1 %181, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %177, i64 %180) + %183 = bitcast i8* %182 to { { double, double }*, %Array* }** + %184 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %183, align 8 + %185 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %184, i32 0, i32 0 + %186 = load { double, double }*, { double, double }** %185, align 8 + %187 = bitcast { double, double }* %186 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %187, i32 1) + %188 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %184, i32 0, i32 1 + %189 = load %Array*, %Array** %188, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %189, i32 1) + %190 = bitcast { { double, double }*, %Array* }* %184 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %190, i32 1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %191 = add i64 %180, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %177, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 1) + %192 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Array* }* }* getelementptr ({ %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* null, i32 1) to i64)) + %193 = bitcast %Tuple* %192 to { %Callable*, { i64, %Array* }* }* + %194 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %193, i32 0, i32 0 + %195 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %193, i32 0, i32 1 + store %Callable* %176, %Callable** %194, align 8 + store { i64, %Array* }* %inputState, { i64, %Array* }** %195, align 8 + %inputStateUnitary = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__41__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__26__FunctionTable, %Tuple* %192) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 1) + %jwTermEnergy = call double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateTermExpectation__body(%Callable* %inputStateUnitary, %Array* %ops, %Array* %coeffs, i64 %nQubits, i64 %nSamples) + %196 = load double, double* %energy, align 8 + %197 = fadd double %196, %jwTermEnergy + store double %197, double* %energy, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + %198 = sub i64 %164, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %199 = phi i64 [ 0, %exit__13 ], [ %204, %exiting__14 ] + %200 = icmp sle i64 %199, %198 + br i1 %200, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %199) + %202 = bitcast i8* %201 to %Array** + %203 = load %Array*, %Array** %202, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %203, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %204 = add i64 %199, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %149, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %159, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %152, i32 -1) + %205 = sub i64 %164, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %206 = phi i64 [ 0, %exit__14 ], [ %211, %exiting__15 ] + %207 = icmp sle i64 %206, %205 + br i1 %207, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %208 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %206) + %209 = bitcast i8* %208 to %Array** + %210 = load %Array*, %Array** %209, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %210, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %211 = add i64 %206, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeffs, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %inputStateUnitary, i32 -1) + br label %exiting__11 + +header__16: ; preds = %exiting__16, %exit__11 + %212 = phi i64 [ 0, %exit__11 ], [ %222, %exiting__16 ] + %213 = icmp sle i64 %212, %169 + br i1 %213, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %212) + %215 = bitcast i8* %214 to { %Array*, %Array* }** + %216 = load { %Array*, %Array* }*, { %Array*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 0 + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + %219 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 1 + %220 = load %Array*, %Array** %219, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %220, i32 -1) + %221 = bitcast { %Array*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %221, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %222 = add i64 %212, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %223 = sub i64 %18, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %224 = phi i64 [ 0, %exit__16 ], [ %234, %exiting__17 ] + %225 = icmp sle i64 %224, %223 + br i1 %225, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %226 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %224) + %227 = bitcast i8* %226 to { %Array*, %Array* }** + %228 = load { %Array*, %Array* }*, { %Array*, %Array* }** %227, align 8 + %229 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + %231 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 1 + %232 = load %Array*, %Array** %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %232, i32 -1) + %233 = bitcast { %Array*, %Array* }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %233, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %234 = add i64 %224, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %235 = sub i64 %33, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %236 = phi i64 [ 0, %exit__17 ], [ %246, %exiting__18 ] + %237 = icmp sle i64 %236, %235 + br i1 %237, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %238 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %236) + %239 = bitcast i8* %238 to { %Array*, %Array* }** + %240 = load { %Array*, %Array* }*, { %Array*, %Array* }** %239, align 8 + %241 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %240, i32 0, i32 0 + %242 = load %Array*, %Array** %241, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 -1) + %243 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %240, i32 0, i32 1 + %244 = load %Array*, %Array** %243, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %244, i32 -1) + %245 = bitcast { %Array*, %Array* }* %240 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %245, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %246 = add i64 %236, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %247 = sub i64 %48, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %248 = phi i64 [ 0, %exit__18 ], [ %258, %exiting__19 ] + %249 = icmp sle i64 %248, %247 + br i1 %249, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %250 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %248) + %251 = bitcast i8* %250 to { %Array*, %Array* }** + %252 = load { %Array*, %Array* }*, { %Array*, %Array* }** %251, align 8 + %253 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %252, i32 0, i32 0 + %254 = load %Array*, %Array** %253, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %254, i32 -1) + %255 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %252, i32 0, i32 1 + %256 = load %Array*, %Array** %255, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %256, i32 -1) + %257 = bitcast { %Array*, %Array* }* %252 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %257, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %258 = add i64 %248, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %259 = load %Array*, %Array** %63, align 8 + %260 = call i64 @__quantum__rt__array_get_size_1d(%Array* %259) + %261 = sub i64 %260, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %262 = phi i64 [ 0, %exit__19 ], [ %273, %exiting__20 ] + %263 = icmp sle i64 %262, %261 + br i1 %263, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %264 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %259, i64 %262) + %265 = bitcast i8* %264 to { { double, double }*, %Array* }** + %266 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %265, align 8 + %267 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %266, i32 0, i32 0 + %268 = load { double, double }*, { double, double }** %267, align 8 + %269 = bitcast { double, double }* %268 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %269, i32 -1) + %270 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %266, i32 0, i32 1 + %271 = load %Array*, %Array** %270, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %271, i32 -1) + %272 = bitcast { { double, double }*, %Array* }* %266 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %272, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %273 = add i64 %262, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %259, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + %274 = sub i64 %3, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %275 = phi i64 [ 0, %exit__20 ], [ %285, %exiting__21 ] + %276 = icmp sle i64 %275, %274 + br i1 %276, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %277 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %275) + %278 = bitcast i8* %277 to { %Array*, %Array* }** + %279 = load { %Array*, %Array* }*, { %Array*, %Array* }** %278, align 8 + %280 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 0 + %281 = load %Array*, %Array** %280, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %281, i32 -1) + %282 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 1 + %283 = load %Array*, %Array** %282, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %283, i32 -1) + %284 = bitcast { %Array*, %Array* }* %279 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %284, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %285 = add i64 %275, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %286 = sub i64 %18, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %287 = phi i64 [ 0, %exit__21 ], [ %297, %exiting__22 ] + %288 = icmp sle i64 %287, %286 + br i1 %288, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %289 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %287) + %290 = bitcast i8* %289 to { %Array*, %Array* }** + %291 = load { %Array*, %Array* }*, { %Array*, %Array* }** %290, align 8 + %292 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %291, i32 0, i32 0 + %293 = load %Array*, %Array** %292, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %293, i32 -1) + %294 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %291, i32 0, i32 1 + %295 = load %Array*, %Array** %294, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %295, i32 -1) + %296 = bitcast { %Array*, %Array* }* %291 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %296, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %297 = add i64 %287, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %298 = sub i64 %33, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %299 = phi i64 [ 0, %exit__22 ], [ %309, %exiting__23 ] + %300 = icmp sle i64 %299, %298 + br i1 %300, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %301 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %299) + %302 = bitcast i8* %301 to { %Array*, %Array* }** + %303 = load { %Array*, %Array* }*, { %Array*, %Array* }** %302, align 8 + %304 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %303, i32 0, i32 0 + %305 = load %Array*, %Array** %304, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %305, i32 -1) + %306 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %303, i32 0, i32 1 + %307 = load %Array*, %Array** %306, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %307, i32 -1) + %308 = bitcast { %Array*, %Array* }* %303 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %308, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %309 = add i64 %299, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %310 = sub i64 %48, 1 + br label %header__24 + +header__24: ; preds = %exiting__24, %exit__23 + %311 = phi i64 [ 0, %exit__23 ], [ %321, %exiting__24 ] + %312 = icmp sle i64 %311, %310 + br i1 %312, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %313 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %311) + %314 = bitcast i8* %313 to { %Array*, %Array* }** + %315 = load { %Array*, %Array* }*, { %Array*, %Array* }** %314, align 8 + %316 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %315, i32 0, i32 0 + %317 = load %Array*, %Array** %316, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %317, i32 -1) + %318 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %315, i32 0, i32 1 + %319 = load %Array*, %Array** %318, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %319, i32 -1) + %320 = bitcast { %Array*, %Array* }* %315 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %320, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %321 = add i64 %311, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %322 = sub i64 %260, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %323 = phi i64 [ 0, %exit__24 ], [ %334, %exiting__25 ] + %324 = icmp sle i64 %323, %322 + br i1 %324, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %325 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %259, i64 %323) + %326 = bitcast i8* %325 to { { double, double }*, %Array* }** + %327 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %326, align 8 + %328 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %327, i32 0, i32 0 + %329 = load { double, double }*, { double, double }** %328, align 8 + %330 = bitcast { double, double }* %329 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %330, i32 -1) + %331 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %327, i32 0, i32 1 + %332 = load %Array*, %Array** %331, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %332, i32 -1) + %333 = bitcast { { double, double }*, %Array* }* %327 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %333, i32 -1) + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %334 = add i64 %323, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_alias_count(%Array* %259, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %indexFunction, i32 -1) + %335 = bitcast { i64, %Callable* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %335, i32 -1) + ret double %168 +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__body(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__body(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__Exp__adj(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__adj(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctl(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctladj(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__h__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__s__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__s__adj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__s__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__s__ctladj(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__y__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__y__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__z__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + %6 = call %Result* @__quantum__rt__result_get_zero() + call void @Microsoft__Quantum__Diagnostics__AssertQubit__body(%Result* %6, %Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__body(%Result* %expected, %Qubit* %q) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 -2, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %q, %Qubit** %5, align 8 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @0, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %0, %Array* %3, %Result* %expected, %String* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + ret void +} + +declare %Result* @__quantum__rt__result_get_zero() + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__adj(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctl(%Array* %ctrls, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctladj(%Array* %__controlQubits__, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctl(%Array* %__controlQubits__, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double 1.000000e+00, %String* %msg, double 1.000000e-10) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__body(%Array*, %Array*, %Result*, double, %String*, double) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %controllingQubits, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Array*, %Result*, %String* }* + %7 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 3 + store %Array* %bases, %Array** %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + store %Result* %result, %Result** %9, align 8 + store %String* %msg, %String** %10, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__adj(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__ctl(%Array*, { %Array*, %Array*, %Result*, double, %String*, double }*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare %String* @__quantum__rt__result_to_string(%Result*) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__adj(%Result* %expected, %Qubit* %q) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 -2, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %q, %Qubit** %5, align 8 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @0, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %0, %Array* %3, %Result* %expected, %String* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__ctl(%Array* %__controlQubits__, { %Result*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 0 + %expected = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 1 + %q = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 -2, i2* %5, align 1 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %q, %Qubit** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @0, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array*, %Result*, %String* }* + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 3 + store %Array* %3, %Array** %14, align 8 + store %Array* %6, %Array** %15, align 8 + store %Result* %expected, %Result** %16, align 8 + store %String* %11, %String** %17, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__ctladj(%Array* %__controlQubits__, { %Result*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 0 + %expected = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 1 + %q = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 -2, i2* %5, align 1 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %q, %Qubit** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @0, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array*, %Result*, %String* }* + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 3 + store %Array* %3, %Array** %14, align 8 + store %Array* %6, %Array** %15, align 8 + store %Result* %expected, %Result** %16, align 8 + store %String* %11, %String** %17, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = icmp ne i1 %actual, %expected + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Diagnostics___40aefd30d51541af9bcd54d40795c522___QsRef2__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics___40aefd30d51541af9bcd54d40795c522___QsRef2__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @1, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_concatenate(%String* %0, %String* %message) + %2 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @2, i32 0, i32 0)) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %2, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + br i1 %expected, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %5 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @3, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @4, i32 0, i32 0)) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %7 = phi %String* [ %5, %condTrue__1 ], [ %6, %condFalse__1 ] + %8 = call %String* @__quantum__rt__string_concatenate(%String* %4, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @5, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__string_concatenate(%String* %8, %String* %9) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + br i1 %actual, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condContinue__1 + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condContinue__1 + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @4, i32 0, i32 0)) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condTrue__2 + %13 = phi %String* [ %11, %condTrue__2 ], [ %12, %condFalse__2 ] + %14 = call %String* @__quantum__rt__string_concatenate(%String* %10, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @__quantum__rt__fail(%String* %14) + unreachable +} + +define internal void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %actual, %String* %message) { +entry: + %0 = xor i1 %actual, true + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__string_update_reference_count(%String* %message, i32 1) + call void @__quantum__rt__fail(%String* %message) + unreachable + +continue__1: ; preds = %entry + ret void +} + +declare void @__quantum__rt__fail(%String*) + +define internal double @Microsoft__Quantum__Characterization__EstimateFrequency__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) { +entry: + %nUp = alloca i64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 1) + store i64 0, i64* %nUp, align 4 + %0 = sub i64 %nMeasurements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxMeasurement = phi i64 [ 0, %entry ], [ %16, %exiting__1 ] + %1 = icmp sle i64 %idxMeasurement, %0 + br i1 %1, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %register = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + store %Array* %register, %Array** %4, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %preparation, %Tuple* %2, %Tuple* null) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %register, %Array** %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %measurement, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { %Result* }* + %10 = getelementptr inbounds { %Result* }, { %Result* }* %9, i32 0, i32 0 + %result = load %Result*, %Result** %10, align 8 + %11 = call %Result* @__quantum__rt__result_get_zero() + %12 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %11) + br i1 %12, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %13 = load i64, i64* %nUp, align 4 + %14 = add i64 %13, 1 + store i64 %14, i64* %nUp, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Reset__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___c5c4aaa3c5eb47859afe1f27f59e164f_ApplyToEach__body(%Callable* %15, %Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %register) + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %16 = add i64 %idxMeasurement, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %17 = load i64, i64* %nUp, align 4 + %18 = sitofp i64 %17 to double + %19 = sitofp i64 %nMeasurements to double + %20 = fdiv double %18, %19 + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 -1) + ret double %20 +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +define internal void @Microsoft__Quantum__Canon___c5c4aaa3c5eb47859afe1f27f59e164f_ApplyToEach__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cc41c4cd834643d1b4bd5cfbd88b1f31_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %2) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 1) + %0 = call double @Microsoft__Quantum__Characterization__EstimateFrequency__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 -1) + ret double %0 +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceCP____body(double %tolerance, %Array* %coefficients) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %10) + %13 = bitcast i8* %12 to { double, double }** + %coefficient = load { double, double }*, { double, double }** %13, align 8 + %14 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %coefficient) + %16 = fcmp ogt double %15, %tolerance + br i1 %16, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %17 = sub i64 %0, 1 + br label %header__3 + +continue__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = sub i64 %0, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__3 ] + %21 = icmp sle i64 %20, %17 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %26 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +header__4: ; preds = %exiting__4, %exit__2 + %27 = phi i64 [ 0, %exit__2 ], [ %33, %exiting__4 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %27) + %30 = bitcast i8* %29 to { double, double }** + %31 = load { double, double }*, { double, double }** %30, align 8 + %32 = bitcast { double, double }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %33 = add i64 %27, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to double* + %coefficient = load double, double* %5, align 8 + %6 = call double @Microsoft__Quantum__Math__AbsD__body(double %coefficient) + %7 = fcmp oge double %6, %tolerance + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +continue__1: ; preds = %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal double @Microsoft__Quantum__Math__AbsD__body(double %a) { +entry: + %0 = fcmp olt double %a, 0.000000e+00 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = fneg double %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi double [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret double %2 +} + +define internal { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %coefficients) { +entry: + %coefficients1 = alloca %Array*, align 8 + %coefficients0 = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %newCoefficientsLength = sdiv i64 %0, 2 + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %2 = sub i64 %newCoefficientsLength, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to double* + store double 0.000000e+00, double* %6, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %1, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %9 = sub i64 %newCoefficientsLength, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %14, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to double* + store double 0.000000e+00, double* %13, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %14 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %8, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %newCoefficientsLength, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxCoeff = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %16 = icmp sle i64 %idxCoeff, %15 + br i1 %16, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %17 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = add i64 %idxCoeff, %newCoefficientsLength + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %22) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %26 = fadd double %21, %25 + %27 = fmul double 5.000000e-01, %26 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idxCoeff) + %29 = bitcast i8* %28 to double* + store double %27, double* %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %coefficients0, align 8 + %30 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + %31 = call %Array* @__quantum__rt__array_copy(%Array* %30, i1 false) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %33 = bitcast i8* %32 to double* + %34 = load double, double* %33, align 8 + %35 = add i64 %idxCoeff, %newCoefficientsLength + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %35) + %37 = bitcast i8* %36 to double* + %38 = load double, double* %37, align 8 + %39 = fsub double %34, %38 + %40 = fmul double 5.000000e-01, %39 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %idxCoeff) + %42 = bitcast i8* %41 to double* + %43 = load double, double* %42, align 8 + store double %40, double* %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + store %Array* %31, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %idxCoeff, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %45 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 1) + %46 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Array*, %Array* }* + %49 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 1 + store %Array* %45, %Array** %49, align 8 + store %Array* %46, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + ret { %Array*, %Array* }* %48 +} + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +define internal double @Microsoft__Quantum__Canon____QsRef2__TrotterStepSize____body(i64 %order) { +entry: + %0 = sitofp i64 %order to double + %1 = fsub double %0, 1.000000e+00 + %2 = fdiv double 1.000000e+00, %1 + %3 = call double @Microsoft__Quantum__Math__PowD__body(double 4.000000e+00, double %2) + %4 = fsub double 4.000000e+00, %3 + %5 = fdiv double 1.000000e+00, %4 + ret double %5 +} + +define internal double @Microsoft__Quantum__Math__PowD__body(double %x, double %y) { +entry: + %0 = call double @llvm.pow.f64(double %x, double %y) + ret double %0 +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 %pauli, i1 %bitApply, %Array* %bits, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @Microsoft__Quantum__Arrays___fca98bb7b93b4b09b24f35a673f97332_Zipped__body(%Array* %bits, %Array* %qubits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %3) + %6 = bitcast i8* %5 to { i1, %Qubit* }** + %7 = load { i1, %Qubit* }*, { i1, %Qubit* }** %6, align 8 + %8 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %7, i32 0, i32 0 + %bit = load i1, i1* %8, align 1 + %9 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %7, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %9, align 8 + %10 = icmp eq i1 %bit, %bitApply + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %11 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %12 = sub i64 %1, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %13) + %16 = bitcast i8* %15 to { i1, %Qubit* }** + %17 = load { i1, %Qubit* }*, { i1, %Qubit* }** %16, align 8 + %18 = bitcast { i1, %Qubit* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___fca98bb7b93b4b09b24f35a673f97332_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i1* + %7 = load i1, i1* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i1, %Qubit* }* + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + store i1 %7, i1* %13, align 1 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i1, %Qubit* }** + store { i1, %Qubit* }* %12, { i1, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i1, %Qubit* }** + %27 = load { i1, %Qubit* }*, { i1, %Qubit* }** %26, align 8 + %28 = bitcast { i1, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i1* + %36 = load i1, i1* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i1, %Qubit* }* + %42 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 1 + store i1 %36, i1* %42, align 1 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i1, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i1, %Qubit* }*, { i1, %Qubit* }** %45, align 8 + %47 = bitcast { i1, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i1, %Qubit* }* %41, { i1, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i1, %Qubit* }** + %56 = load { i1, %Qubit* }*, { i1, %Qubit* }** %55, align 8 + %57 = bitcast { i1, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 %pauli, i1 %bitApply, %Array* %bits, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %__qsVar0__nBits__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @Microsoft__Quantum__Arrays___fca98bb7b93b4b09b24f35a673f97332_Zipped__body(%Array* %bits, %Array* %qubits) + %1 = call %Array* @Microsoft__Quantum__Arrays___fca98bb7b93b4b09b24f35a673f97332_Zipped__body(%Array* %bits, %Array* %qubits) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + %4 = insertvalue %Range zeroinitializer, i64 %3, 0 + %5 = insertvalue %Range %4, i64 -1, 1 + %6 = insertvalue %Range %5, i64 0, 2 + %7 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %6, i1 true) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %7) + %9 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %10 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 %10) + %13 = bitcast i8* %12 to { i1, %Qubit* }** + %14 = load { i1, %Qubit* }*, { i1, %Qubit* }** %13, align 8 + %15 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %14, i32 0, i32 0 + %__qsVar1__bit__ = load i1, i1* %15, align 1 + %16 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %14, i32 0, i32 1 + %__qsVar2__qubit__ = load %Qubit*, %Qubit** %16, align 8 + %17 = icmp eq i1 %__qsVar1__bit__, %bitApply + br i1 %17, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %__qsVar2__qubit__) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %21) + %24 = bitcast i8* %23 to { i1, %Qubit* }** + %25 = load { i1, %Qubit* }*, { i1, %Qubit* }** %24, align 8 + %26 = bitcast { i1, %Qubit* }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %29) + %32 = bitcast i8* %31 to { i1, %Qubit* }** + %33 = load { i1, %Qubit* }*, { i1, %Qubit* }** %32, align 8 + %34 = bitcast { i1, %Qubit* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__ctl(%Array* %__controlQubits__, { i2, i1, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 1 + %bitApply = load i1, i1* %2, align 1 + %3 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 2 + %bits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %4 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %5 = call %Array* @Microsoft__Quantum__Arrays___fca98bb7b93b4b09b24f35a673f97332_Zipped__body(%Array* %bits, %Array* %qubits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %7 = sub i64 %6, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %8 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %9 = icmp sle i64 %8, %7 + br i1 %9, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %8) + %11 = bitcast i8* %10 to { i1, %Qubit* }** + %12 = load { i1, %Qubit* }*, { i1, %Qubit* }** %11, align 8 + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %bit = load i1, i1* %13, align 1 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %14, align 8 + %15 = icmp eq i1 %bit, %bitApply + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i2, %Qubit* }* + %18 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %17, i32 0, i32 1 + store i2 %pauli, i2* %18, align 1 + store %Qubit* %qubit, %Qubit** %19, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %17) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %20 = add i64 %8, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %21 = sub i64 %6, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %22 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %22) + %25 = bitcast i8* %24 to { i1, %Qubit* }** + %26 = load { i1, %Qubit* }*, { i1, %Qubit* }** %25, align 8 + %27 = bitcast { i1, %Qubit* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %22, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__ctladj(%Array* %__controlQubits__, { i2, i1, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 1 + %bitApply = load i1, i1* %2, align 1 + %3 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 2 + %bits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %4 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %__qsVar0__nBits__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %5 = call %Array* @Microsoft__Quantum__Arrays___fca98bb7b93b4b09b24f35a673f97332_Zipped__body(%Array* %bits, %Array* %qubits) + %6 = call %Array* @Microsoft__Quantum__Arrays___fca98bb7b93b4b09b24f35a673f97332_Zipped__body(%Array* %bits, %Array* %qubits) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %5, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %15 = phi i64 [ 0, %entry ], [ %27, %exiting__1 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to { i1, %Qubit* }** + %19 = load { i1, %Qubit* }*, { i1, %Qubit* }** %18, align 8 + %20 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %19, i32 0, i32 0 + %__qsVar1__bit__ = load i1, i1* %20, align 1 + %21 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %19, i32 0, i32 1 + %__qsVar2__qubit__ = load %Qubit*, %Qubit** %21, align 8 + %22 = icmp eq i1 %__qsVar1__bit__, %bitApply + br i1 %22, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i2, %Qubit* }* + %25 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %24, i32 0, i32 1 + store i2 %pauli, i2* %25, align 1 + store %Qubit* %__qsVar2__qubit__, %Qubit** %26, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %24) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %27 = add i64 %15, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %28 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %29 = sub i64 %28, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %30 = phi i64 [ 0, %exit__1 ], [ %36, %exiting__2 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %30) + %33 = bitcast i8* %32 to { i1, %Qubit* }** + %34 = load { i1, %Qubit* }*, { i1, %Qubit* }** %33, align 8 + %35 = bitcast { i1, %Qubit* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %30, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + %37 = sub i64 %7, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %38) + %41 = bitcast i8* %40 to { i1, %Qubit* }** + %42 = load { i1, %Qubit* }*, { i1, %Qubit* }** %41, align 8 + %43 = bitcast { i1, %Qubit* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___6dd6a6a5a3954655b874dc7b0ce22a6c_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @6, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %11 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %qubits__1) + %12 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %11) + %13 = call %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %12, %Qubit* %13) + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %15 = icmp eq i64 %14, 2 + br i1 %15, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call double @Microsoft__Quantum__Math__AbsD__body(double %18) + %20 = fcmp ogt double %19, %tolerance + br i1 %20, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %22 = bitcast i8* %21 to i2* + store i2 0, i2* %22, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %theta = fmul double 1.000000e+00, %25 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %26 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %qubits__1) + %27 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %26) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients0, { %Array* }* %27) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %27, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %31 = getelementptr inbounds { %Array* }, { %Array* }* %12, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Arrays___6dd6a6a5a3954655b874dc7b0ce22a6c_IsEmpty__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp eq i64 %0, 0 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %nElementsTotal, double %defaultElement, %Array* %inputArray) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %0 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @18, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %0, i1 true, %String* %1) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___a50b761db41347a2a0cd7b53be7db59d_ConstantArray__body(i64 %nElementsPad, double %defaultElement) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %2 = icmp sge i64 %nElementsTotal, 0 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %3 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %5 = phi %Array* [ %3, %condTrue__1 ], [ %4, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %5 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.powi.f64.i32(double, i32) #0 + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %23 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %24, %Qubit* %target) + %25 = call i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %25, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %26 = call %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %26, %Qubit* %target) + %27 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %1) + %28 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %27) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %28, %Qubit* %target) + %29 = call %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %29, %Qubit* %target) + %30 = getelementptr inbounds { %Array* }, { %Array* }* %28, i32 0, i32 0 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + %32 = bitcast { %Array* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %33 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + ret void +} + +define internal { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %__Item1__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %__Item1__, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__Item1__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 -1) + ret { %Array* }* %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 2 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + %3 = call %Array* @__quantum__rt__array_slice_1d(%Array* %array, %Range %2, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + ret %Array* %3 +} + +define internal %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp sgt i64 %0, 0 + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @16, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %1, i1 true, %String* %2) + %3 = sub i64 %0, 1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + ret %Qubit* %6 +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___6dd6a6a5a3954655b874dc7b0ce22a6c_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @6, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %12 = icmp eq i64 %11, 2 + br i1 %12, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %14 = bitcast i8* %13 to double* + %15 = load double, double* %14, align 8 + %16 = call double @Microsoft__Quantum__Math__AbsD__body(double %15) + %17 = fcmp ogt double %16, %tolerance + br i1 %17, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %19 = bitcast i8* %18 to i2* + store i2 0, i2* %19, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %21 = bitcast i8* %20 to double* + %22 = load double, double* %21, align 8 + %theta = fmul double 1.000000e+00, %22 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %23 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %qubits__1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %24) + %25 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + %27 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %28 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %qubits__1) + %29 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %28) + %30 = call %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %29, %Qubit* %30) + %31 = getelementptr inbounds { %Array* }, { %Array* }* %29, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %23 = call i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %23, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %24 = call %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %24, %Qubit* %target) + %25 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %1) + %26 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %25) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %26, %Qubit* %target) + %27 = call %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %27, %Qubit* %target) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %26, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %31 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %1) + %32 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %31) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %32, %Qubit* %target) + %33 = getelementptr inbounds { %Array* }, { %Array* }* %32, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %32 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___6dd6a6a5a3954655b874dc7b0ce22a6c_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @6, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 1) + %15 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %qubits__1) + %16 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + %17 = call %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %qubits__1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, { %Array* }*, %Qubit* }* + %20 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 3 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients1, %Array** %21, align 8 + store { %Array* }* %16, { %Array* }** %22, align 8 + store %Qubit* %17, %Qubit** %23, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %19) + %24 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %25 = icmp eq i64 %24, 2 + br i1 %25, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %27 = bitcast i8* %26 to double* + %28 = load double, double* %27, align 8 + %29 = call double @Microsoft__Quantum__Math__AbsD__body(double %28) + %30 = fcmp ogt double %29, %tolerance + br i1 %30, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %32 = bitcast i8* %31 to i2* + store i2 0, i2* %32, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %34 = bitcast i8* %33 to double* + %35 = load double, double* %34, align 8 + %theta = fmul double 1.000000e+00, %35 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, double, %Array* }* + %38 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 1 + %40 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 2 + store %Array* %paulis, %Array** %38, align 8 + store double %theta, double* %39, align 8 + store %Array* %qubits__1, %Array** %40, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %37) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 1) + %41 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %qubits__1) + %42 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %41) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { double, %Array*, { %Array* }* }* + %45 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 2 + store double %tolerance, double* %45, align 8 + store %Array* %coefficients0, %Array** %46, align 8 + store { %Array* }* %42, { %Array* }** %47, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %44) + %48 = getelementptr inbounds { %Array* }, { %Array* }* %42, i32 0, i32 0 + %49 = load %Array*, %Array** %48, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %50 = bitcast { %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %51 = getelementptr inbounds { %Array* }, { %Array* }* %16, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %control, %Qubit* %target) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___6dd6a6a5a3954655b874dc7b0ce22a6c_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @6, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %16 = icmp eq i64 %15, 2 + br i1 %16, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %18 = bitcast i8* %17 to double* + %19 = load double, double* %18, align 8 + %20 = call double @Microsoft__Quantum__Math__AbsD__body(double %19) + %21 = fcmp ogt double %20, %tolerance + br i1 %21, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + store i2 0, i2* %23, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %25 = bitcast i8* %24 to double* + %26 = load double, double* %25, align 8 + %theta = fmul double 1.000000e+00, %26 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, double, %Array* }* + %29 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 2 + store %Array* %paulis, %Array** %29, align 8 + store double %theta, double* %30, align 8 + store %Array* %qubits__1, %Array** %31, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %28) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 1) + %32 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %qubits__1) + %33 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %32) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double, %Array*, { %Array* }* }* + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 1 + %38 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 2 + store double %tolerance, double* %36, align 8 + store %Array* %__qsVar1__coefficients0__, %Array** %37, align 8 + store { %Array* }* %33, { %Array* }** %38, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %35) + %39 = getelementptr inbounds { %Array* }, { %Array* }* %33, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + %41 = bitcast { %Array* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 1) + %42 = call %Array* @Microsoft__Quantum__Arrays___649bb8be6ca848a4a0ca6b07e95f940f_Most__body(%Array* %qubits__1) + %43 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %42) + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + %44 = call %Qubit* @Microsoft__Quantum__Arrays___a59a999436ee4c879471cfe3c81d978e_Tail__body(%Array* %qubits__1) + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { double, %Array*, { %Array* }*, %Qubit* }* + %47 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 1 + %49 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 2 + %50 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 3 + store double %tolerance, double* %47, align 8 + store %Array* %__qsVar2__coefficients1__, %Array** %48, align 8 + store { %Array* }* %43, { %Array* }** %49, align 8 + store %Qubit* %44, %Qubit** %50, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %46) + %51 = getelementptr inbounds { %Array* }, { %Array* }* %43, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___88d35104feff4319834025969e88b807_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef2__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %target, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %14 = icmp eq i2 %pauli, 1 + br i1 %14, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %18 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 4 + store %Callable* %15, %Callable** %18, align 8 + store double %tolerance, double* %19, align 8 + store %Array* %coefficients, %Array** %20, align 8 + store i2 -2, i2* %21, align 1 + store { %Array* }* %control, { %Array* }** %22, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %16) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__body(%Callable* %23, %Callable* %op__1, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %24 = icmp eq i2 %pauli, -1 + br i1 %24, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %25 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 2 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 3 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 4 + store %Callable* %25, %Callable** %28, align 8 + store double %tolerance, double* %29, align 8 + store %Array* %coefficients, %Array** %30, align 8 + store i2 1, i2* %31, align 1 + store { %Array* }* %control, { %Array* }** %32, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %26) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %33 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__body(%Callable* %33, %Callable* %op__2, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %34 = icmp eq i2 %pauli, 0 + br i1 %34, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %35 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @7, i32 0, i32 0)) + %36 = icmp eq i2 1, %pauli + br i1 %36, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @8, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %38 = icmp eq i2 -1, %pauli + br i1 %38, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %39 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @9, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %40 = icmp eq i2 -2, %pauli + br i1 %40, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @10, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %43 = phi %String* [ %41, %condTrue__3 ], [ %42, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %44 = phi %String* [ %39, %condTrue__2 ], [ %43, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %45 = phi %String* [ %37, %condTrue__1 ], [ %44, %condContinue__2 ] + %46 = call %String* @__quantum__rt__string_concatenate(%String* %35, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @12, i32 0, i32 0)) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %48) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__body(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Qubit* }* + %2 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %1, i32 0, i32 0 + store %Qubit* %target, %Qubit** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %0, %Tuple* null) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit* }* + %5 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %4, i32 0, i32 0 + store %Qubit* %target, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %innerOperation, %Tuple* %3, %Tuple* null) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Qubit* }* + %9 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %8, i32 0, i32 0 + store %Qubit* %target, %Qubit** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Qubit* }* + %14 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %13, i32 0, i32 0 + store %Qubit* %target, %Qubit** %14, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %15 = icmp eq i2 %pauli, 1 + br i1 %15, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 2 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 3 + %23 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 4 + store %Callable* %16, %Callable** %19, align 8 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients, %Array** %21, align 8 + store i2 -2, i2* %22, align 1 + store { %Array* }* %control, { %Array* }** %23, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %17) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %24 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__adj(%Callable* %24, %Callable* %__qsVar1__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %25 = icmp eq i2 %pauli, -1 + br i1 %25, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %26 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 2 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 3 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 4 + store %Callable* %26, %Callable** %29, align 8 + store double %tolerance, double* %30, align 8 + store %Array* %coefficients, %Array** %31, align 8 + store i2 1, i2* %32, align 1 + store { %Array* }* %control, { %Array* }** %33, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %27) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %34 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %34) + call void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__adj(%Callable* %34, %Callable* %__qsVar2__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %35 = icmp eq i2 %pauli, 0 + br i1 %35, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %36 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @7, i32 0, i32 0)) + %37 = icmp eq i2 1, %pauli + br i1 %37, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %38 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @8, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %39 = icmp eq i2 -1, %pauli + br i1 %39, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %40 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @9, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %41 = icmp eq i2 -2, %pauli + br i1 %41, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @10, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %43 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %44 = phi %String* [ %42, %condTrue__3 ], [ %43, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %45 = phi %String* [ %40, %condTrue__2 ], [ %44, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %46 = phi %String* [ %38, %condTrue__1 ], [ %45, %condContinue__2 ] + %47 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %46) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + %48 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @12, i32 0, i32 0)) + %49 = call %String* @__quantum__rt__string_concatenate(%String* %47, %String* %48) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %49) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %op__1, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %op__2, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @7, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @8, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @9, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @10, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @12, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %17) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %__qsVar1__op__, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %__qsVar2__op__, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @7, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @8, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @9, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @10, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @11, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @12, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__adj(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Qubit* }* + %3 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %2, i32 0, i32 0 + store %Qubit* %target, %Qubit** %3, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %1, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Qubit* }* + %11 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %10, i32 0, i32 0 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__ctl(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Qubit* }* + %6 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %5, i32 0, i32 0 + store %Qubit* %target, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %4, %Tuple* null) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %8, %Tuple* null) + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Qubit* }* + %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 + store %Qubit* %target, %Qubit** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___83e18a513c9c4da9be4769ede745259a_ApplyWithCA__ctladj(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Qubit* }* + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 1 + store %Array* %controlRegister, %Array** %11, align 8 + store %Qubit* %target, %Qubit** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Qubit* }* + %16 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %15, i32 0, i32 0 + store %Qubit* %target, %Qubit** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %14, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rng) { +entry: + %0 = extractvalue %Range %rng, 0 + %1 = extractvalue %Range %rng, 1 + %2 = extractvalue %Range %rng, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %3 = icmp sgt i64 %1, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %0, %preheader__1 ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %idx, %2 + %5 = icmp sge i64 %idx, %2 + %6 = select i1 %3, i1 %4, i1 %5 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + ret i1 false + +exiting__1: ; No predecessors! + %7 = add i64 %idx, %1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret i1 true +} + +define internal %Callable* @Microsoft__Quantum__Canon__MultiplexerBruteForceFromGenerator__body(i64 %0, %Callable* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %unitaryGenerator = bitcast %Tuple* %2 to { i64, %Callable* }* + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %4 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store %Callable* %1, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Callable* }* }* getelementptr ({ %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store %Callable* %5, %Callable** %8, align 8 + store { i64, %Callable* }* %unitaryGenerator, { i64, %Callable* }** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__13__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__3__FunctionTable, %Tuple* %6) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret %Callable* %10 +} + +define internal void @Lifted__PartialApplication__13__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__body({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__adj({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__ctl(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__ctladj(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__3__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__3__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__body({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %nIndex to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %nStates = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %7, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %8 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nStates, i64 %nUnitaries) + %9 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxOp = phi i64 [ 0, %entry ], [ %24, %exiting__1 ] + %10 = icmp sle i64 %idxOp, %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64 }* + %13 = getelementptr inbounds { i64 }, { i64 }* %12, i32 0, i32 0 + store i64 %idxOp, i64* %13, align 4 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %11, %Tuple* %14) + %15 = bitcast %Tuple* %14 to { %Callable* }* + %16 = getelementptr inbounds { %Callable* }, { %Callable* }* %15, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @Microsoft__Quantum__Canon___67201737def945c086e2e0eb97960033_ControlledOnInt__body(i64 %idxOp, %Callable* %17) + %19 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, %Array* }* + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + store %Array* %19, %Array** %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %idxOp, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + %25 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__adj({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryFunction__ = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %__qsVar0__nIndex__ to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %__qsVar1__nStates__ = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %7, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %8 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar1__nStates__, i64 %__qsVar2__nUnitaries__) + %9 = sub i64 %8, 1 + %10 = sub i64 %9, 0 + %11 = sdiv i64 %10, 1 + %12 = mul i64 1, %11 + %13 = add i64 0, %12 + %14 = insertvalue %Range zeroinitializer, i64 %13, 0 + %15 = insertvalue %Range %14, i64 -1, 1 + %16 = insertvalue %Range %15, i64 0, 2 + %17 = extractvalue %Range %16, 0 + %18 = extractvalue %Range %16, 1 + %19 = extractvalue %Range %16, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %20 = icmp sgt i64 %18, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar4__idxOp__ = phi i64 [ %17, %preheader__1 ], [ %38, %exiting__1 ] + %21 = icmp sle i64 %__qsVar4__idxOp__, %19 + %22 = icmp sge i64 %__qsVar4__idxOp__, %19 + %23 = select i1 %20, i1 %21, i1 %22 + br i1 %23, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64 }* + %26 = getelementptr inbounds { i64 }, { i64 }* %25, i32 0, i32 0 + store i64 %__qsVar4__idxOp__, i64* %26, align 4 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__unitaryFunction__, %Tuple* %24, %Tuple* %27) + %28 = bitcast %Tuple* %27 to { %Callable* }* + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + %31 = call %Callable* @Microsoft__Quantum__Canon___67201737def945c086e2e0eb97960033_ControlledOnInt__body(i64 %__qsVar4__idxOp__, %Callable* %30) + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %32) + %33 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { %Array*, %Array* }* + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + store %Array* %33, %Array** %36, align 8 + store %Array* %target, %Array** %37, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %34, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %38 = add i64 %__qsVar4__idxOp__, %18 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + %39 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %9 = trunc i64 %nIndex to i32 + %10 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %9) + %nStates = fptosi double %10 to i64 + %11 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %11, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %12 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nStates, i64 %nUnitaries) + %13 = sub i64 %12, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxOp = phi i64 [ 0, %entry ], [ %33, %exiting__1 ] + %14 = icmp sle i64 %idxOp, %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { i64 }* + %17 = getelementptr inbounds { i64 }, { i64 }* %16, i32 0, i32 0 + store i64 %idxOp, i64* %17, align 4 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %15, %Tuple* %18) + %19 = bitcast %Tuple* %18 to { %Callable* }* + %20 = getelementptr inbounds { %Callable* }, { %Callable* }* %19, i32 0, i32 0 + %21 = load %Callable*, %Callable** %20, align 8 + %22 = call %Callable* @Microsoft__Quantum__Canon___67201737def945c086e2e0eb97960033_ControlledOnInt__body(i64 %idxOp, %Callable* %21) + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %24 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array*, %Array* }* + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 1 + store %Array* %24, %Array** %27, align 8 + store %Array* %target, %Array** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Array*, { %Array*, %Array* }* }* + %31 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %30, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %31, align 8 + store { %Array*, %Array* }* %26, { %Array*, %Array* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %29, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %idxOp, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + %34 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f4659990d902465a85590edb80da0e54_MultiplexOperationsBruteForceFromGenerator__ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryFunction__ = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %9 = trunc i64 %__qsVar0__nIndex__ to i32 + %10 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %9) + %__qsVar1__nStates__ = fptosi double %10 to i64 + %11 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %11, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %12 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar1__nStates__, i64 %__qsVar2__nUnitaries__) + %13 = sub i64 %12, 1 + %14 = sub i64 %13, 0 + %15 = sdiv i64 %14, 1 + %16 = mul i64 1, %15 + %17 = add i64 0, %16 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %24 = icmp sgt i64 %22, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar4__idxOp__ = phi i64 [ %21, %preheader__1 ], [ %46, %exiting__1 ] + %25 = icmp sle i64 %__qsVar4__idxOp__, %23 + %26 = icmp sge i64 %__qsVar4__idxOp__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64 }* + %30 = getelementptr inbounds { i64 }, { i64 }* %29, i32 0, i32 0 + store i64 %__qsVar4__idxOp__, i64* %30, align 4 + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__unitaryFunction__, %Tuple* %28, %Tuple* %31) + %32 = bitcast %Tuple* %31 to { %Callable* }* + %33 = getelementptr inbounds { %Callable* }, { %Callable* }* %32, i32 0, i32 0 + %34 = load %Callable*, %Callable** %33, align 8 + %35 = call %Callable* @Microsoft__Quantum__Canon___67201737def945c086e2e0eb97960033_ControlledOnInt__body(i64 %__qsVar4__idxOp__, %Callable* %34) + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %37 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %38 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %39 = bitcast %Tuple* %38 to { %Array*, %Array* }* + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + store %Array* %37, %Array** %40, align 8 + store %Array* %target, %Array** %41, align 8 + %42 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %43 = bitcast %Tuple* %42 to { %Array*, { %Array*, %Array* }* }* + %44 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %43, i32 0, i32 0 + %45 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %43, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %44, align 8 + store { %Array*, %Array* }* %39, { %Array*, %Array* }** %45, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %42, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %38, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %46 = add i64 %__qsVar4__idxOp__, %22 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + %47 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___ac50acd82a7e42128d811608cb927809_Compose__body(%Callable* %outer, %Callable* %inner) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___bd411897933a412dbc60a337f9d409f8___QsRef2__ComposedOutput____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %inner, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Callable* %outer, %Callable** %4, align 8 + store %Callable* %inner, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__14__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__4__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__14__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { double }* + %6 = getelementptr inbounds { double }, { double }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, double }* getelementptr ({ %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store %Callable* %4, %Callable** %11, align 8 + store double %7, double* %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bd411897933a412dbc60a337f9d409f8___QsRef2__ComposedOutput____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load %Callable*, %Callable** %2, align 8 + %6 = load double, double* %3, align 8 + %7 = call { double, double }* @Microsoft__Quantum__Canon___bd411897933a412dbc60a337f9d409f8___QsRef2__ComposedOutput____body(%Callable* %4, %Callable* %5, double %6) + %8 = bitcast %Tuple* %result-tuple to { { double, double }* }* + %9 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %8, i32 0, i32 0 + store { double, double }* %7, { double, double }** %9, align 8 + ret void +} + +define internal void @MemoryManagement__4__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__4__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { double, double }* @Microsoft__Quantum__Canon___bd411897933a412dbc60a337f9d409f8___QsRef2__ComposedOutput____body(%Callable* %outer, %Callable* %inner, double %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double }* + %2 = getelementptr inbounds { double }, { double }* %1, i32 0, i32 0 + store double %target, double* %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %inner, %Tuple* %0, %Tuple* %3) + %4 = bitcast %Tuple* %3 to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + %6 = load double, double* %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { double }* + %9 = getelementptr inbounds { double }, { double }* %8, i32 0, i32 0 + store double %6, double* %9, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %outer, %Tuple* %7, %Tuple* %10) + %11 = bitcast %Tuple* %10 to { { double, double }* }* + %12 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret { double, double }* %13 +} + +define internal void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____body(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %9 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %9) + %12 = bitcast i8* %11 to %Callable** + %op = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %target, %Array** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %9, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %17 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %18 = phi i64 [ 0, %exit__2 ], [ %23, %exiting__3 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %22 = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %22, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %23 = add i64 %18, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____adj(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %23, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array* }* + %22 = getelementptr inbounds { %Array* }, { %Array* }* %21, i32 0, i32 0 + store %Array* %target, %Array** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %23 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %24 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %25 = phi i64 [ 0, %exit__2 ], [ %30, %exiting__3 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %25) + %28 = bitcast i8* %27 to %Callable** + %29 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %29, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %30 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %12) + %15 = bitcast i8* %14 to %Callable** + %op = load %Callable*, %Callable** %15, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %16 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %16) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, %Array* }* + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %19, align 8 + store %Array* %target, %Array** %20, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %16, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %22 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %23) + %26 = bitcast i8* %25 to %Callable** + %27 = load %Callable*, %Callable** %26, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %14, i1 true) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %15) + %17 = sub i64 %16, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %target, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %34, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %29) + %32 = bitcast i8* %31 to %Callable** + %33 = load %Callable*, %Callable** %32, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %33, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %34 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + ret void +} + +define internal %Range @Microsoft__Quantum__Arrays___cc41c4cd834643d1b4bd5cfbd88b1f31_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__body(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @13, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Qubit* }* + %9 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %9, align 8 + store %Qubit* %targetRegister, %Qubit** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__adj(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @13, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Qubit* }* + %9 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %9, align 8 + store %Qubit* %targetRegister, %Qubit** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__ctl(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @13, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %14, align 8 + store %Qubit* %targetRegister, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__ctladj(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @13, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %14, align 8 + store %Qubit* %targetRegister, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__body(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @13, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %9, align 8 + store %Array* %targetRegister, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__adj(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @13, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %9, align 8 + store %Array* %targetRegister, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__ctl(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @13, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %14, align 8 + store %Array* %targetRegister, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Array* }* %13, { %Array*, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__ctladj(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @13, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %14, align 8 + store %Array* %targetRegister, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Array* }* %13, { %Array*, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____body({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, double, %Array* }* + %7 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 2 + store i64 %idx, i64* %7, align 4 + store double %stepSize, double* %8, align 8 + store %Array* %target, %Array** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %5, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____adj({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + %4 = sub i64 %3, 0 + %5 = sdiv i64 %4, 1 + %6 = mul i64 1, %5 + %7 = add i64 0, %6 + %8 = insertvalue %Range zeroinitializer, i64 %7, 0 + %9 = insertvalue %Range %8, i64 -1, 1 + %10 = insertvalue %Range %9, i64 0, 2 + %11 = extractvalue %Range %10, 0 + %12 = extractvalue %Range %10, 1 + %13 = extractvalue %Range %10, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %14 = icmp sgt i64 %12, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idx__ = phi i64 [ %11, %preheader__1 ], [ %24, %exiting__1 ] + %15 = icmp sle i64 %__qsVar0__idx__, %13 + %16 = icmp sge i64 %__qsVar0__idx__, %13 + %17 = select i1 %14, i1 %15, i1 %16 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { i64, double, %Array* }* + %21 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 1 + %23 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %21, align 4 + store double %stepSize, double* %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %__qsVar0__idx__, %12 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idx, %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, double, %Array* }* + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 2 + store i64 %idx, i64* %12, align 4 + store double %stepSize, double* %13, align 8 + store %Array* %target, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { i64, double, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store { i64, double, %Array* }* %11, { i64, double, %Array* }** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + %8 = sub i64 %7, 0 + %9 = sdiv i64 %8, 1 + %10 = mul i64 1, %9 + %11 = add i64 0, %10 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idx__ = phi i64 [ %15, %preheader__1 ], [ %32, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idx__, %17 + %20 = icmp sge i64 %__qsVar0__idx__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, double, %Array* }* + %25 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %25, align 4 + store double %stepSize, double* %26, align 8 + store %Array* %target, %Array** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Array*, { i64, double, %Array* }* }* + %30 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %29, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %30, align 8 + store { i64, double, %Array* }* %24, { i64, double, %Array* }** %31, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %28, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %32 = add i64 %__qsVar0__idx__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___1fe14bbd24584359ab40c526d5861af6_BoundCA__body(%Array* %operations) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %operations, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %operations, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__15__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__5__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + ret %Callable* %20 +} + +define internal void @Lifted__PartialApplication__15__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___fe3fa6d0df8b470d94ee1e5ef782dfc8___QsRef2__ApplyBoundCA____ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__5__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__5__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___67201737def945c086e2e0eb97960033_ControlledOnInt__body(i64 %numberState, %Callable* %oracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i64, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store i64 %numberState, i64* %4, align 4 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__16__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__6__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__16__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Array* }* %15, { i64, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Array* }* %15, { i64, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__body(i64 %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__adj(i64 %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Array* }*, { i64, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__ctl(%Array* %3, { i64, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Array* }*, { i64, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__ctladj(%Array* %3, { i64, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__6__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__6__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__body(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___9a30fce349964e6ba551089dfee89481_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array*, %Array* }* + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + store %Array* %controlRegister, %Array** %4, align 8 + store %Array* %targetRegister, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__adj(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___9a30fce349964e6ba551089dfee89481_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %2 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %2) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Array* }* + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + store %Array* %controlRegister, %Array** %5, align 8 + store %Array* %targetRegister, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__ctl(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___9a30fce349964e6ba551089dfee89481_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %targetRegister, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___66cc0323981f4a52a7db37c456c9273e_ApplyControlledOnInt__ctladj(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___9a30fce349964e6ba551089dfee89481_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %targetRegister, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___78aac080fc954ed4b9e248419df94bda_ControlledOnInt__body(i64 %numberState, %Callable* %oracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i64, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store i64 %numberState, i64* %4, align 4 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__17__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__7__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__17__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Qubit* }* %15, { i64, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Qubit* }* %15, { i64, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__body(i64 %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__adj(i64 %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Qubit* }*, { i64, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__ctl(%Array* %3, { i64, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Qubit* }*, { i64, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__ctladj(%Array* %3, { i64, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__7__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__7__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__body(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___8fb3402632744afcb503d18268c290e7_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array*, %Qubit* }* + %4 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %3, i32 0, i32 1 + store %Array* %controlRegister, %Array** %4, align 8 + store %Qubit* %targetRegister, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__adj(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___8fb3402632744afcb503d18268c290e7_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %2 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %2) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Qubit* }* + %5 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + store %Array* %controlRegister, %Array** %5, align 8 + store %Qubit* %targetRegister, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__ctl(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___8fb3402632744afcb503d18268c290e7_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %targetRegister, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Qubit* }* %9, { %Array*, %Qubit* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f562e828e774456b904e1952ae2c1ab1_ApplyControlledOnInt__ctladj(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___8fb3402632744afcb503d18268c290e7_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %targetRegister, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Qubit* }* %9, { %Array*, %Qubit* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__body(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__adj(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__ctl(%Array* %__controlQubits__, %Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__ctladj(%Array* %__controlQubits__, %Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body(i64 %order, { i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp sgt i64 %order, 2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %stepSizeOuter = call double @Microsoft__Quantum__Canon____QsRef2__TrotterStepSize____body(i64 %order) + %4 = fmul double 4.000000e+00, %stepSizeOuter + %stepSizeInner = fsub double 1.000000e+00, %4 + %5 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Callable* }* + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 1 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body(i64 %5, { i64, %Callable* }* %7, double %10, %Array* %target) + %11 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %nSteps, i64* %14, align 4 + store %Callable* %op, %Callable** %15, align 8 + %16 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body(i64 %11, { i64, %Callable* }* %13, double %16, %Array* %target) + %17 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { i64, %Callable* }* + %20 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 1 + store i64 %nSteps, i64* %20, align 4 + store %Callable* %op, %Callable** %21, align 8 + %22 = fmul double %stepSizeInner, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body(i64 %17, { i64, %Callable* }* %19, double %22, %Array* %target) + %23 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, %Callable* }* + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 1 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body(i64 %23, { i64, %Callable* }* %25, double %28, %Array* %target) + %29 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { i64, %Callable* }* + %32 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 1 + store i64 %nSteps, i64* %32, align 4 + store %Callable* %op, %Callable** %33, align 8 + %34 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body(i64 %29, { i64, %Callable* }* %31, double %34, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %35 = icmp eq i64 %order, 2 + br i1 %35, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { i64, %Callable* }* + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 1 + store i64 %nSteps, i64* %38, align 4 + store %Callable* %op, %Callable** %39, align 8 + call void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____body({ i64, %Callable* }* %37, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, %Callable* }* + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + store i64 %nSteps, i64* %42, align 4 + store %Callable* %op, %Callable** %43, align 8 + call void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____body({ i64, %Callable* }* %41, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____body({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, double, %Array* }* + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 2 + store i64 %idx, i64* %8, align 4 + store double %5, double* %9, align 8 + store %Array* %target, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %6, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %12 = sub i64 %nSteps, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idx__1 = phi i64 [ %12, %preheader__1 ], [ %22, %exiting__2 ] + %13 = icmp sle i64 %idx__1, 0 + %14 = icmp sge i64 %idx__1, 0 + %15 = select i1 false, i1 %13, i1 %14 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { i64, double, %Array* }* + %19 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 2 + store i64 %idx__1, i64* %19, align 4 + store double %16, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %idx__1, -1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj(i64 %order, { i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp sgt i64 %order, 2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %__qsVar0__stepSizeOuter__ = call double @Microsoft__Quantum__Canon____QsRef2__TrotterStepSize____body(i64 %order) + %4 = fmul double 4.000000e+00, %__qsVar0__stepSizeOuter__ + %__qsVar1__stepSizeInner__ = fsub double 1.000000e+00, %4 + %5 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Callable* }* + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 1 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj(i64 %5, { i64, %Callable* }* %7, double %10, %Array* %target) + %11 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %nSteps, i64* %14, align 4 + store %Callable* %op, %Callable** %15, align 8 + %16 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj(i64 %11, { i64, %Callable* }* %13, double %16, %Array* %target) + %17 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { i64, %Callable* }* + %20 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 1 + store i64 %nSteps, i64* %20, align 4 + store %Callable* %op, %Callable** %21, align 8 + %22 = fmul double %__qsVar1__stepSizeInner__, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj(i64 %17, { i64, %Callable* }* %19, double %22, %Array* %target) + %23 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, %Callable* }* + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 1 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj(i64 %23, { i64, %Callable* }* %25, double %28, %Array* %target) + %29 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { i64, %Callable* }* + %32 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 1 + store i64 %nSteps, i64* %32, align 4 + store %Callable* %op, %Callable** %33, align 8 + %34 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj(i64 %29, { i64, %Callable* }* %31, double %34, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %35 = icmp eq i64 %order, 2 + br i1 %35, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { i64, %Callable* }* + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 1 + store i64 %nSteps, i64* %38, align 4 + store %Callable* %op, %Callable** %39, align 8 + call void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____adj({ i64, %Callable* }* %37, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, %Callable* }* + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + store i64 %nSteps, i64* %42, align 4 + store %Callable* %op, %Callable** %43, align 8 + call void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____adj({ i64, %Callable* }* %41, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____adj({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + %4 = sub i64 0, %3 + %5 = sdiv i64 %4, -1 + %6 = mul i64 -1, %5 + %7 = add i64 %3, %6 + %8 = insertvalue %Range zeroinitializer, i64 %7, 0 + %9 = insertvalue %Range %8, i64 1, 1 + %10 = insertvalue %Range %9, i64 %3, 2 + %11 = extractvalue %Range %10, 0 + %12 = extractvalue %Range %10, 1 + %13 = extractvalue %Range %10, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %14 = icmp sgt i64 %12, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar1__idx__ = phi i64 [ %11, %preheader__1 ], [ %25, %exiting__1 ] + %15 = icmp sle i64 %__qsVar1__idx__, %13 + %16 = icmp sge i64 %__qsVar1__idx__, %13 + %17 = select i1 %14, i1 %15, i1 %16 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + %19 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, double, %Array* }* + %22 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 2 + store i64 %__qsVar1__idx__, i64* %22, align 4 + store double %19, double* %23, align 8 + store %Array* %target, %Array** %24, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %25 = add i64 %__qsVar1__idx__, %12 + br label %header__1 + +exit__1: ; preds = %header__1 + %26 = sub i64 %nSteps, 1 + %27 = sub i64 %26, 0 + %28 = sdiv i64 %27, 1 + %29 = mul i64 1, %28 + %30 = add i64 0, %29 + %31 = insertvalue %Range zeroinitializer, i64 %30, 0 + %32 = insertvalue %Range %31, i64 -1, 1 + %33 = insertvalue %Range %32, i64 0, 2 + %34 = extractvalue %Range %33, 0 + %35 = extractvalue %Range %33, 1 + %36 = extractvalue %Range %33, 2 + br label %preheader__2 + +preheader__2: ; preds = %exit__1 + %37 = icmp sgt i64 %35, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__2 + %__qsVar0__idx__ = phi i64 [ %34, %preheader__2 ], [ %48, %exiting__2 ] + %38 = icmp sle i64 %__qsVar0__idx__, %36 + %39 = icmp sge i64 %__qsVar0__idx__, %36 + %40 = select i1 %37, i1 %38, i1 %39 + br i1 %40, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %41 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %41) + %42 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { i64, double, %Array* }* + %45 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %45, align 4 + store double %42, double* %46, align 8 + store %Array* %target, %Array** %47, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %41, %Tuple* %43, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %48 = add i64 %__qsVar0__idx__, %35 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %order = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %stepSize = load double, double* %4, align 8 + %5 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 0 + %nSteps = load i64, i64* %6, align 4 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 1 + %op = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %8 = icmp sgt i64 %order, 2 + br i1 %8, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %stepSizeOuter = call double @Microsoft__Quantum__Canon____QsRef2__TrotterStepSize____body(i64 %order) + %9 = fmul double 4.000000e+00, %stepSizeOuter + %stepSizeInner = fsub double 1.000000e+00, %9 + %10 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, %Callable* }* + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + store i64 %nSteps, i64* %13, align 4 + store %Callable* %op, %Callable** %14, align 8 + %15 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %10, i64* %18, align 4 + store { i64, %Callable* }* %12, { i64, %Callable* }** %19, align 8 + store double %15, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %17) + %22 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, %Callable* }* + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + store i64 %nSteps, i64* %25, align 4 + store %Callable* %op, %Callable** %26, align 8 + %27 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64, { i64, %Callable* }*, double, %Array* }* + %30 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 3 + store i64 %22, i64* %30, align 4 + store { i64, %Callable* }* %24, { i64, %Callable* }** %31, align 8 + store double %27, double* %32, align 8 + store %Array* %target, %Array** %33, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %29) + %34 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Callable* }* + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + store i64 %nSteps, i64* %37, align 4 + store %Callable* %op, %Callable** %38, align 8 + %39 = fmul double %stepSizeInner, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, { i64, %Callable* }*, double, %Array* }* + %42 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 1 + %44 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 2 + %45 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 3 + store i64 %34, i64* %42, align 4 + store { i64, %Callable* }* %36, { i64, %Callable* }** %43, align 8 + store double %39, double* %44, align 8 + store %Array* %target, %Array** %45, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %41) + %46 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Callable* }* + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + store i64 %nSteps, i64* %49, align 4 + store %Callable* %op, %Callable** %50, align 8 + %51 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %53 = bitcast %Tuple* %52 to { i64, { i64, %Callable* }*, double, %Array* }* + %54 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 1 + %56 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 2 + %57 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 3 + store i64 %46, i64* %54, align 4 + store { i64, %Callable* }* %48, { i64, %Callable* }** %55, align 8 + store double %51, double* %56, align 8 + store %Array* %target, %Array** %57, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %53) + %58 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %59 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %60 = bitcast %Tuple* %59 to { i64, %Callable* }* + %61 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 0 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 1 + store i64 %nSteps, i64* %61, align 4 + store %Callable* %op, %Callable** %62, align 8 + %63 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { i64, { i64, %Callable* }*, double, %Array* }* + %66 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 2 + %69 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 3 + store i64 %58, i64* %66, align 4 + store { i64, %Callable* }* %60, { i64, %Callable* }** %67, align 8 + store double %63, double* %68, align 8 + store %Array* %target, %Array** %69, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %65) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %70 = icmp eq i64 %order, 2 + br i1 %70, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { i64, %Callable* }* + %73 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 1 + store i64 %nSteps, i64* %73, align 4 + store %Callable* %op, %Callable** %74, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %75 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %76 = bitcast %Tuple* %75 to { { i64, %Callable* }*, double, %Array* }* + %77 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 0 + %78 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 1 + %79 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 2 + store { i64, %Callable* }* %72, { i64, %Callable* }** %77, align 8 + store double %stepSize, double* %78, align 8 + store %Array* %target, %Array** %79, align 8 + call void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %76) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { i64, %Callable* }* + %82 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 1 + store i64 %nSteps, i64* %82, align 4 + store %Callable* %op, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %85 = bitcast %Tuple* %84 to { { i64, %Callable* }*, double, %Array* }* + %86 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 1 + %88 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 2 + store { i64, %Callable* }* %81, { i64, %Callable* }** %86, align 8 + store double %stepSize, double* %87, align 8 + store %Array* %target, %Array** %88, align 8 + call void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %85) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %8 = icmp sle i64 %idx, %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %10 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, double, %Array* }* + %13 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 2 + store i64 %idx, i64* %13, align 4 + store double %10, double* %14, align 8 + store %Array* %target, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { i64, double, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { i64, double, %Array* }* %12, { i64, double, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %21 = sub i64 %nSteps, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idx__1 = phi i64 [ %21, %preheader__1 ], [ %36, %exiting__2 ] + %22 = icmp sle i64 %idx__1, 0 + %23 = icmp sge i64 %idx__1, 0 + %24 = select i1 false, i1 %22, i1 %23 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %25) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %26 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { i64, double, %Array* }* + %29 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 2 + store i64 %idx__1, i64* %29, align 4 + store double %26, double* %30, align 8 + store %Array* %target, %Array** %31, align 8 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Array*, { i64, double, %Array* }* }* + %34 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %33, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %34, align 8 + store { i64, double, %Array* }* %28, { i64, double, %Array* }** %35, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %25, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %idx__1, -1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %order = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %stepSize = load double, double* %4, align 8 + %5 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 0 + %nSteps = load i64, i64* %6, align 4 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 1 + %op = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %8 = icmp sgt i64 %order, 2 + br i1 %8, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %__qsVar0__stepSizeOuter__ = call double @Microsoft__Quantum__Canon____QsRef2__TrotterStepSize____body(i64 %order) + %9 = fmul double 4.000000e+00, %__qsVar0__stepSizeOuter__ + %__qsVar1__stepSizeInner__ = fsub double 1.000000e+00, %9 + %10 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, %Callable* }* + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + store i64 %nSteps, i64* %13, align 4 + store %Callable* %op, %Callable** %14, align 8 + %15 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %10, i64* %18, align 4 + store { i64, %Callable* }* %12, { i64, %Callable* }** %19, align 8 + store double %15, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %17) + %22 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, %Callable* }* + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + store i64 %nSteps, i64* %25, align 4 + store %Callable* %op, %Callable** %26, align 8 + %27 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64, { i64, %Callable* }*, double, %Array* }* + %30 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 3 + store i64 %22, i64* %30, align 4 + store { i64, %Callable* }* %24, { i64, %Callable* }** %31, align 8 + store double %27, double* %32, align 8 + store %Array* %target, %Array** %33, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %29) + %34 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Callable* }* + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + store i64 %nSteps, i64* %37, align 4 + store %Callable* %op, %Callable** %38, align 8 + %39 = fmul double %__qsVar1__stepSizeInner__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, { i64, %Callable* }*, double, %Array* }* + %42 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 1 + %44 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 2 + %45 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 3 + store i64 %34, i64* %42, align 4 + store { i64, %Callable* }* %36, { i64, %Callable* }** %43, align 8 + store double %39, double* %44, align 8 + store %Array* %target, %Array** %45, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %41) + %46 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Callable* }* + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + store i64 %nSteps, i64* %49, align 4 + store %Callable* %op, %Callable** %50, align 8 + %51 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %53 = bitcast %Tuple* %52 to { i64, { i64, %Callable* }*, double, %Array* }* + %54 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 1 + %56 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 2 + %57 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 3 + store i64 %46, i64* %54, align 4 + store { i64, %Callable* }* %48, { i64, %Callable* }** %55, align 8 + store double %51, double* %56, align 8 + store %Array* %target, %Array** %57, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %53) + %58 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %59 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %60 = bitcast %Tuple* %59 to { i64, %Callable* }* + %61 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 0 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 1 + store i64 %nSteps, i64* %61, align 4 + store %Callable* %op, %Callable** %62, align 8 + %63 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { i64, { i64, %Callable* }*, double, %Array* }* + %66 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 2 + %69 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 3 + store i64 %58, i64* %66, align 4 + store { i64, %Callable* }* %60, { i64, %Callable* }** %67, align 8 + store double %63, double* %68, align 8 + store %Array* %target, %Array** %69, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %65) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %70 = icmp eq i64 %order, 2 + br i1 %70, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { i64, %Callable* }* + %73 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 1 + store i64 %nSteps, i64* %73, align 4 + store %Callable* %op, %Callable** %74, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %75 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %76 = bitcast %Tuple* %75 to { { i64, %Callable* }*, double, %Array* }* + %77 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 0 + %78 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 1 + %79 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 2 + store { i64, %Callable* }* %72, { i64, %Callable* }** %77, align 8 + store double %stepSize, double* %78, align 8 + store %Array* %target, %Array** %79, align 8 + call void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %76) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { i64, %Callable* }* + %82 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 1 + store i64 %nSteps, i64* %82, align 4 + store %Callable* %op, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %85 = bitcast %Tuple* %84 to { { i64, %Callable* }*, double, %Array* }* + %86 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 1 + %88 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 2 + store { i64, %Callable* }* %81, { i64, %Callable* }** %86, align 8 + store double %stepSize, double* %87, align 8 + store %Array* %target, %Array** %88, align 8 + call void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %85) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + %8 = sub i64 0, %7 + %9 = sdiv i64 %8, -1 + %10 = mul i64 -1, %9 + %11 = add i64 %7, %10 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 1, 1 + %14 = insertvalue %Range %13, i64 %7, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar1__idx__ = phi i64 [ %15, %preheader__1 ], [ %33, %exiting__1 ] + %19 = icmp sle i64 %__qsVar1__idx__, %17 + %20 = icmp sge i64 %__qsVar1__idx__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, double, %Array* }* + %26 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 2 + store i64 %__qsVar1__idx__, i64* %26, align 4 + store double %23, double* %27, align 8 + store %Array* %target, %Array** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Array*, { i64, double, %Array* }* }* + %31 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %30, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %31, align 8 + store { i64, double, %Array* }* %25, { i64, double, %Array* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %29, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %__qsVar1__idx__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + %34 = sub i64 %nSteps, 1 + %35 = sub i64 %34, 0 + %36 = sdiv i64 %35, 1 + %37 = mul i64 1, %36 + %38 = add i64 0, %37 + %39 = insertvalue %Range zeroinitializer, i64 %38, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__2 + +preheader__2: ; preds = %exit__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__2 + %__qsVar0__idx__ = phi i64 [ %42, %preheader__2 ], [ %60, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0__idx__, %44 + %47 = icmp sge i64 %__qsVar0__idx__, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %49, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %49) + call void @__quantum__rt__callable_make_controlled(%Callable* %49) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %50 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %51 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %52 = bitcast %Tuple* %51 to { i64, double, %Array* }* + %53 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 0 + %54 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 1 + %55 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %53, align 4 + store double %50, double* %54, align 8 + store %Array* %target, %Array** %55, align 8 + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { %Array*, { i64, double, %Array* }* }* + %58 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %57, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %58, align 8 + store { i64, double, %Array* }* %52, { i64, double, %Array* }** %59, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %49, %Tuple* %56, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %49, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %49, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %__qsVar0__idx__, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %number, i64 %bits) { +entry: + %tempInt = alloca i64, align 8 + %outputBits = alloca %Array*, align 8 + %0 = icmp sge i64 %bits, 0 + br i1 %0, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %1 = icmp sle i64 %bits, 63 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %2 = phi i1 [ %1, %condTrue__1 ], [ %0, %entry ] + %3 = trunc i64 %bits to i32 + %4 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %3) + %5 = fptosi double %4 to i64 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([33 x i8], [33 x i8]* @19, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__int_to_string(i64 %5) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %8) + %9 = icmp slt i64 %bits, 63 + br i1 %9, label %condTrue__2, label %condFalse__1 + +condTrue__2: ; preds = %condContinue__1 + %10 = shl i64 1, %bits + br label %condContinue__2 + +condFalse__1: ; preds = %condContinue__1 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__1, %condTrue__2 + %max = phi i64 [ %10, %condTrue__2 ], [ 9223372036854775807, %condFalse__1 ] + %11 = icmp sge i64 %number, 0 + br i1 %11, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condContinue__2 + %12 = icmp sle i64 %number, %max + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condContinue__2 + %13 = phi i1 [ %12, %condTrue__3 ], [ %11, %condContinue__2 ] + %14 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([34 x i8], [34 x i8]* @20, i32 0, i32 0)) + %15 = call %String* @__quantum__rt__int_to_string(i64 %bits) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @21, i32 0, i32 0)) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__int_to_string(i64 %number) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @12, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %13, %String* %22) + %23 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %bits) + %24 = sub i64 %bits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %condContinue__3 + %25 = phi i64 [ 0, %condContinue__3 ], [ %29, %exiting__1 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %25) + %28 = bitcast i8* %27 to i1* + store i1 false, i1* %28, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %29 = add i64 %25, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %23, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + store i64 %number, i64* %tempInt, align 4 + %30 = sub i64 %bits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %41, %exiting__2 ] + %31 = icmp sle i64 %idxBit, %30 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = load i64, i64* %tempInt, align 4 + %35 = srem i64 %34, 2 + %36 = icmp eq i64 %35, 0 + %37 = select i1 %36, i1 false, i1 true + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxBit) + %39 = bitcast i8* %38 to i1* + store i1 %37, i1* %39, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %outputBits, align 8 + %40 = sdiv i64 %34, 2 + store i64 %40, i64* %tempInt, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %41 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %42 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + ret %Array* %42 +} + +define internal %Callable* @Microsoft__Quantum__Canon___8fb3402632744afcb503d18268c290e7_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %bits, %Array** %4, align 8 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__18__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__8__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal %Callable* @Microsoft__Quantum__Canon___9a30fce349964e6ba551089dfee89481_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %bits, %Array** %4, align 8 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__19__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__9__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Microsoft__Quantum__Canon___9b30dd7f42984964b38a96887fa617c5_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cc41c4cd834643d1b4bd5cfbd88b1f31_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9b30dd7f42984964b38a96887fa617c5_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cc41c4cd834643d1b4bd5cfbd88b1f31_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + %4 = sub i64 %3, %1 + %5 = sdiv i64 %4, %2 + %6 = mul i64 %2, %5 + %7 = add i64 %1, %6 + %8 = sub i64 0, %2 + %9 = insertvalue %Range zeroinitializer, i64 %7, 0 + %10 = insertvalue %Range %9, i64 %8, 1 + %11 = insertvalue %Range %10, i64 %1, 2 + %12 = extractvalue %Range %11, 0 + %13 = extractvalue %Range %11, 1 + %14 = extractvalue %Range %11, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %15 = icmp sgt i64 %13, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %12, %preheader__1 ], [ %26, %exiting__1 ] + %16 = icmp sle i64 %__qsVar0__idxQubit__, %14 + %17 = icmp sge i64 %__qsVar0__idxQubit__, %14 + %18 = select i1 %15, i1 %16, i1 %17 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Qubit* }* + %25 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %24, i32 0, i32 0 + store %Qubit* %22, %Qubit** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %26 = add i64 %__qsVar0__idxQubit__, %13 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9b30dd7f42984964b38a96887fa617c5_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___cc41c4cd834643d1b4bd5cfbd88b1f31_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %7 = icmp sgt i64 %5, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %4, %preheader__1 ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idxQubit, %6 + %9 = icmp sge i64 %idxQubit, %6 + %10 = select i1 %7, i1 %8, i1 %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store %Qubit* %14, %Qubit** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxQubit, %5 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9b30dd7f42984964b38a96887fa617c5_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___cc41c4cd834643d1b4bd5cfbd88b1f31_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + %7 = sub i64 %6, %4 + %8 = sdiv i64 %7, %5 + %9 = mul i64 %5, %8 + %10 = add i64 %4, %9 + %11 = sub i64 0, %5 + %12 = insertvalue %Range zeroinitializer, i64 %10, 0 + %13 = insertvalue %Range %12, i64 %11, 1 + %14 = insertvalue %Range %13, i64 %4, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %15, %preheader__1 ], [ %30, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idxQubit__, %17 + %20 = icmp sge i64 %__qsVar0__idxQubit__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, %Qubit* }* + %28 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %28, align 8 + store %Qubit* %25, %Qubit** %29, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %26, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %30 = add i64 %__qsVar0__idxQubit__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Qubit* }* %15, { %Array*, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Qubit* }* %15, { %Array*, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__body(%Array* %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__adj(%Array* %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Qubit* }*, { %Array*, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__ctl(%Array* %3, { %Array*, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Qubit* }*, { %Array*, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___9de2894b265e48a892179ea0586e5387_ApplyControlledOnBitString__ctladj(%Array* %3, { %Array*, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__8__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__8__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__19__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Array* }* %15, { %Array*, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Array* }* %15, { %Array*, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__body(%Array* %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__adj(%Array* %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Array* }*, { %Array*, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__ctl(%Array* %3, { %Array*, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Array* }*, { %Array*, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___fa6244d0f10748a4b57c7438a03da38e_ApplyControlledOnBitString__ctladj(%Array* %3, { %Array*, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__9__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__9__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___c1d04641885042dab9d08faf4f56c426_DecomposedIntoTimeStepsCA__body({ i64, %Callable* }* %0, i64 %trotterOrder) { +entry: + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp eq i64 %trotterOrder, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, i64, %Callable* }* + %7 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 2 + store %Callable* %4, %Callable** %7, align 8 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__20__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %10 + +test1__1: ; preds = %entry + %11 = icmp eq i64 %trotterOrder, 2 + br i1 %11, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Callable*, i64, %Callable* }* + %15 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 1 + %17 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 2 + store %Callable* %12, %Callable** %15, align 8 + store i64 %nSteps, i64* %16, align 4 + store %Callable* %op, %Callable** %17, align 8 + %18 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__21__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %13) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %18 + +test2__1: ; preds = %test1__1 + %19 = srem i64 %trotterOrder, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %21 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, i64, %Callable* }* getelementptr ({ %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { %Callable*, i64, i64, %Callable* }* + %24 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 3 + store %Callable* %21, %Callable** %24, align 8 + store i64 %trotterOrder, i64* %25, align 4 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__22__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %22) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %28 + +else__1: ; preds = %test2__1 + %29 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @14, i32 0, i32 0)) + %30 = call %String* @__quantum__rt__int_to_string(i64 %trotterOrder) + %31 = call %String* @__quantum__rt__string_concatenate(%String* %29, %String* %30) + call void @__quantum__rt__string_update_reference_count(%String* %29, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + %32 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @15, i32 0, i32 0)) + %33 = call %String* @__quantum__rt__string_concatenate(%String* %31, %String* %32) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__fail(%String* %33) + unreachable + +continue__1: ; No predecessors! + unreachable +} + +define internal void @Lifted__PartialApplication__20__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____body({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____adj({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctl(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___8b00e26b9e574173ad5506567afb5dcb___QsRef2__Trotter1ImplCA____ctladj(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__10__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__10__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__21__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____body({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____adj({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctl(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___077dcc9977a341d8a21af45bf15c79a9___QsRef2__Trotter2ImplCA____ctladj(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Lifted__PartialApplication__22__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Callable* }* + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 1 + store i64 %4, i64* %9, align 4 + store %Callable* %6, %Callable** %10, align 8 + %11 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 0 + %13 = load double, double* %12, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %2, i64* %18, align 4 + store { i64, %Callable* }* %8, { i64, %Callable* }** %19, align 8 + store double %13, double* %20, align 8 + store %Array* %15, %Array** %21, align 8 + %22 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %16, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Callable* }* + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 1 + store i64 %4, i64* %9, align 4 + store %Callable* %6, %Callable** %10, align 8 + %11 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 0 + %13 = load double, double* %12, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %2, i64* %18, align 4 + store { i64, %Callable* }* %8, { i64, %Callable* }** %19, align 8 + store double %13, double* %20, align 8 + store %Array* %15, %Array** %21, align 8 + %22 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %16, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 3 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %9, i64* %14, align 4 + store %Callable* %11, %Callable** %15, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %17 = load double, double* %16, align 8 + %18 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, { i64, %Callable* }*, double, %Array* }* + %22 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 3 + store i64 %7, i64* %22, align 4 + store { i64, %Callable* }* %13, { i64, %Callable* }** %23, align 8 + store double %17, double* %24, align 8 + store %Array* %19, %Array** %25, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %28 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 1 + store %Array* %3, %Array** %28, align 8 + store { i64, { i64, %Callable* }*, double, %Array* }* %21, { i64, { i64, %Callable* }*, double, %Array* }** %29, align 8 + %30 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 0 + %31 = load %Callable*, %Callable** %30, align 8 + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %32) + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %26, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 3 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %9, i64* %14, align 4 + store %Callable* %11, %Callable** %15, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %17 = load double, double* %16, align 8 + %18 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, { i64, %Callable* }*, double, %Array* }* + %22 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 3 + store i64 %7, i64* %22, align 4 + store { i64, %Callable* }* %13, { i64, %Callable* }** %23, align 8 + store double %17, double* %24, align 8 + store %Array* %19, %Array** %25, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %28 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 1 + store %Array* %3, %Array** %28, align 8 + store { i64, { i64, %Callable* }*, double, %Array* }* %21, { i64, { i64, %Callable* }*, double, %Array* }** %29, align 8 + %30 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 0 + %31 = load %Callable*, %Callable** %30, align 8 + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %32) + call void @__quantum__rt__callable_make_controlled(%Callable* %32) + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %26, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____body(i64 %5, { i64, %Callable* }* %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____adj(i64 %5, { i64, %Callable* }* %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, { i64, %Callable* }*, double, %Array* }*, { i64, { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctl(%Array* %3, { i64, { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, { i64, %Callable* }*, double, %Array* }*, { i64, { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___6e560be9a2374223aac5746c66773998___QsRef2__TrotterArbitraryImplCA____ctladj(%Array* %3, { i64, { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__11__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__11__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +declare %String* @__quantum__rt__int_to_string(i64) + +define internal i64 @Microsoft__Quantum__Math__MinI__body(i64 %a, i64 %b) { +entry: + %0 = icmp slt i64 %a, %b + %1 = select i1 %0, i64 %a, i64 %b + ret i64 %1 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation____QsRef2___AddGeneratorSystems____body(i64 %idxTerm, i64 %nTermsA, i64 %nTermsB, %Callable* %generatorIndexFunctionA, %Callable* %generatorIndexFunctionB) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + %0 = icmp slt i64 %idxTerm, %nTermsA + br i1 %0, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { i64 }* + %3 = getelementptr inbounds { i64 }, { i64 }* %2, i32 0, i32 0 + store i64 %idxTerm, i64* %3, align 4 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorIndexFunctionA, %Tuple* %1, %Tuple* %4) + %5 = bitcast %Tuple* %4 to { { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %7 + +else__1: ; preds = %entry + %8 = sub i64 %idxTerm, %nTermsA + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %8, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorIndexFunctionB, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %15 + +continue__1: ; No predecessors! + unreachable +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____body(double %trotterStepSize, i64 %trotterOrder, double %maxTime, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %11 = fdiv double %maxTime, %trotterStepSize + %nTimeSlices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %11) + %12 = sitofp i64 %nTimeSlices to double + %resizedTrotterStepSize = fdiv double %maxTime, %12 + %13 = sub i64 %nTimeSlices, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxTimeSlice = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %idxTimeSlice, %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %resizedTrotterStepSize) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array* }* + %18 = getelementptr inbounds { %Array* }, { %Array* }* %17, i32 0, i32 0 + store %Array* %qubits, %Array** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxTimeSlice, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal i64 @Microsoft__Quantum__Math__Ceiling__body(double %value) { +entry: + %0 = call { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef1__ExtendedTruncation____body(double %value) + %1 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 0 + %truncated = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 1 + %remainder = load double, double* %2, align 8 + %3 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 2 + %isPositive = load i1, i1* %3, align 1 + %4 = call double @Microsoft__Quantum__Math__AbsD__body(double %remainder) + %5 = fcmp ole double %4, 1.000000e-15 + br i1 %5, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %6 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret i64 %truncated + +else__1: ; preds = %entry + br i1 %isPositive, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %7 = add i64 %truncated, 1 + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %8 = phi i64 [ %7, %condTrue__1 ], [ %truncated, %condFalse__1 ] + %9 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret i64 %8 + +continue__1: ; No predecessors! + unreachable +} + +define internal %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %trotterStepSize) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %6 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %12 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %11, i32 0, i32 1 + store %Callable* %9, %Callable** %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__25__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__14__FunctionTable, %Tuple* %10) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %trotterForm = bitcast %Tuple* %15 to { i64, %Callable* }* + %16 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %trotterForm, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %trotterForm, i32 0, i32 1 + store i64 %nTerms, i64* %16, align 4 + store %Callable* %14, %Callable** %17, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %18 = call %Callable* @Microsoft__Quantum__Canon___c1d04641885042dab9d08faf4f56c426_DecomposedIntoTimeStepsCA__body({ i64, %Callable* }* %trotterForm, i64 %trotterOrder) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Callable*, double }* + %21 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %20, i32 0, i32 1 + store %Callable* %18, %Callable** %21, align 8 + store double %trotterStepSize, double* %22, align 8 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__26__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %19) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Callable* %23 +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____adj(double %trotterStepSize, i64 %trotterOrder, double %maxTime, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %11 = fdiv double %maxTime, %trotterStepSize + %__qsVar0__nTimeSlices__ = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %11) + %12 = sitofp i64 %__qsVar0__nTimeSlices__ to double + %__qsVar1__resizedTrotterStepSize__ = fdiv double %maxTime, %12 + %13 = sub i64 %__qsVar0__nTimeSlices__, 1 + %14 = sub i64 %13, 0 + %15 = sdiv i64 %14, 1 + %16 = mul i64 1, %15 + %17 = add i64 0, %16 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %24 = icmp sgt i64 %22, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar2__idxTimeSlice__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__1 ] + %25 = icmp sle i64 %__qsVar2__idxTimeSlice__, %23 + %26 = icmp sge i64 %__qsVar2__idxTimeSlice__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %28 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %__qsVar1__resizedTrotterStepSize__) + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array* }* + %32 = getelementptr inbounds { %Array* }, { %Array* }* %31, i32 0, i32 0 + store %Array* %qubits, %Array** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %__qsVar2__idxTimeSlice__, %22 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____ctl(%Array* %__controlQubits__, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %trotterStepSize = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %trotterOrder = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %maxTime = load double, double* %3, align 8 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %qubits = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %17 = fdiv double %maxTime, %trotterStepSize + %nTimeSlices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %17) + %18 = sitofp i64 %nTimeSlices to double + %resizedTrotterStepSize = fdiv double %maxTime, %18 + %19 = sub i64 %nTimeSlices, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxTimeSlice = phi i64 [ 0, %entry ], [ %27, %exiting__1 ] + %20 = icmp sle i64 %idxTimeSlice, %19 + br i1 %20, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %21 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %resizedTrotterStepSize) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %21, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %qubits, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %27 = add i64 %idxTimeSlice, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____ctladj(%Array* %__controlQubits__, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %trotterStepSize = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %trotterOrder = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %maxTime = load double, double* %3, align 8 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %qubits = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %17 = fdiv double %maxTime, %trotterStepSize + %__qsVar0__nTimeSlices__ = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %17) + %18 = sitofp i64 %__qsVar0__nTimeSlices__ to double + %__qsVar1__resizedTrotterStepSize__ = fdiv double %maxTime, %18 + %19 = sub i64 %__qsVar0__nTimeSlices__, 1 + %20 = sub i64 %19, 0 + %21 = sdiv i64 %20, 1 + %22 = mul i64 1, %21 + %23 = add i64 0, %22 + %24 = insertvalue %Range zeroinitializer, i64 %23, 0 + %25 = insertvalue %Range %24, i64 -1, 1 + %26 = insertvalue %Range %25, i64 0, 2 + %27 = extractvalue %Range %26, 0 + %28 = extractvalue %Range %26, 1 + %29 = extractvalue %Range %26, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %30 = icmp sgt i64 %28, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar2__idxTimeSlice__ = phi i64 [ %27, %preheader__1 ], [ %40, %exiting__1 ] + %31 = icmp sle i64 %__qsVar2__idxTimeSlice__, %29 + %32 = icmp sge i64 %__qsVar2__idxTimeSlice__, %29 + %33 = select i1 %30, i1 %31, i1 %32 + br i1 %33, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %34 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %__qsVar1__resizedTrotterStepSize__) + %35 = call %Callable* @__quantum__rt__callable_copy(%Callable* %34, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %35) + call void @__quantum__rt__callable_make_controlled(%Callable* %35) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, %Array* }* + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %37, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %38, align 8 + store %Array* %qubits, %Array** %39, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %35, %Tuple* %36, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %40 = add i64 %__qsVar2__idxTimeSlice__, %28 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %idx, double %stepsize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %6 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %idx, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorSystemFunction, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %16 = load { %Array*, %Array* }*, { %Array*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Callable* }* }* + %27 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Callable* }*, { %Callable* }** %27, align 8 + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { double, %Array* }* + %33 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %32, i32 0, i32 1 + store double %stepsize, double* %33, align 8 + store %Array* %qubits, %Array** %34, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %30, %Tuple* %31, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + %35 = bitcast { %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____adj({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %idx, double %stepsize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %__qsVar0__evolutionSet__ = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %__qsVar0__evolutionSet__, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %__qsVar0__evolutionSet__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %__qsVar1__generatorSystem__ = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 1 + %__qsVar3__generatorSystemFunction__ = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %6 = bitcast { i64, %Callable* }* %__qsVar1__generatorSystem__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 0 + %__qsVar2__nTerms__ = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %idx, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__generatorSystemFunction__, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %__qsVar4__generatorIndex__ = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 0 + %16 = load { %Array*, %Array* }*, { %Array*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Callable* }* }* + %27 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Callable* }*, { %Callable* }** %27, align 8 + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + %31 = call %Callable* @__quantum__rt__callable_copy(%Callable* %30, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array* }* + %34 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %33, i32 0, i32 1 + store double %stepsize, double* %34, align 8 + store %Array* %qubits, %Array** %35, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %31, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + %36 = bitcast { %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____ctl(%Array* %__controlQubits__, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %2, align 8 + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %8 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %idx = load i64, i64* %10, align 4 + %11 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %stepsize = load double, double* %11, align 8 + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %13, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64 }* + %16 = getelementptr inbounds { i64 }, { i64 }* %15, i32 0, i32 0 + store i64 %idx, i64* %16, align 4 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorSystemFunction, %Tuple* %14, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { { %Array*, %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %19, align 8 + %20 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %29, %Tuple* %30) + %31 = bitcast %Tuple* %30 to { { %Callable* }* }* + %32 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Callable* }*, { %Callable* }** %32, align 8 + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { double, %Array* }* + %39 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 1 + store double %stepsize, double* %39, align 8 + store %Array* %qubits, %Array** %40, align 8 + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { %Array*, { double, %Array* }* }* + %43 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %43, align 8 + store { double, %Array* }* %38, { double, %Array* }** %44, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %41, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + %45 = bitcast { %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____ctladj(%Array* %__controlQubits__, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %__qsVar0__evolutionSet__ = load { %Callable* }*, { %Callable* }** %2, align 8 + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %__qsVar0__evolutionSet__, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %__qsVar0__evolutionSet__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %__qsVar1__generatorSystem__ = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 1 + %__qsVar3__generatorSystemFunction__ = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %8 = bitcast { i64, %Callable* }* %__qsVar1__generatorSystem__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %idx = load i64, i64* %10, align 4 + %11 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %stepsize = load double, double* %11, align 8 + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 0 + %__qsVar2__nTerms__ = load i64, i64* %13, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64 }* + %16 = getelementptr inbounds { i64 }, { i64 }* %15, i32 0, i32 0 + store i64 %idx, i64* %16, align 4 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__generatorSystemFunction__, %Tuple* %14, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { { %Array*, %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %__qsVar4__generatorIndex__ = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %19, align 8 + %20 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 0 + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %29, %Tuple* %30) + %31 = bitcast %Tuple* %30 to { { %Callable* }* }* + %32 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Callable* }*, { %Callable* }** %32, align 8 + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { double, %Array* }* + %39 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 1 + store double %stepsize, double* %39, align 8 + store %Array* %qubits, %Array** %40, align 8 + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { %Array*, { double, %Array* }* }* + %43 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %43, align 8 + store { double, %Array* }* %38, { double, %Array* }** %44, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %41, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + %45 = bitcast { %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body({ i64, %Callable* }* %generatorSystemA, { i64, %Callable* }* %generatorSystemB) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystemA, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %generatorSystemA to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystemB, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { i64, %Callable* }* %generatorSystemB to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %nTermsA = call i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystemA) + %nTermsB = call i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystemB) + %generatorIndexFunctionA = call %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystemA) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + %generatorIndexFunctionB = call %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystemB) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef2___AddGeneratorSystems____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionB, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, i64, %Callable*, %Callable* }* getelementptr ({ %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Callable*, i64, i64, %Callable*, %Callable* }* + %9 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 4 + store %Callable* %6, %Callable** %9, align 8 + store i64 %nTermsA, i64* %10, align 4 + store i64 %nTermsB, i64* %11, align 4 + store %Callable* %generatorIndexFunctionA, %Callable** %12, align 8 + store %Callable* %generatorIndexFunctionB, %Callable** %13, align 8 + %generatorIndexFunction = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__23__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__12__FunctionTable, %Tuple* %7) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %14 = add i64 %nTermsA, %nTermsB + %15 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %14, %Callable* %generatorIndexFunction) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunction, i32 -1) + ret { i64, %Callable* }* %15 +} + +define internal i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorIndexFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + ret i64 %nTerms +} + +define internal %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorIndexFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + ret %Callable* %generatorIndexFunction +} + +define internal void @Lifted__PartialApplication__23__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 1 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 2 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 3 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 4 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, i64, %Callable*, %Callable* }* getelementptr ({ i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, i64, i64, %Callable*, %Callable* }* + %14 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 4 + store i64 %2, i64* %14, align 4 + store i64 %5, i64* %15, align 4 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Callable* %11, %Callable** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2___AddGeneratorSystems____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load i64, i64* %1, align 4 + %7 = load i64, i64* %2, align 4 + %8 = load i64, i64* %3, align 4 + %9 = load %Callable*, %Callable** %4, align 8 + %10 = load %Callable*, %Callable** %5, align 8 + %11 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation____QsRef2___AddGeneratorSystems____body(i64 %6, i64 %7, i64 %8, %Callable* %9, %Callable* %10) + %12 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %11, { { %Array*, %Array* }*, %Array* }** %13, align 8 + ret void +} + +define internal void @MemoryManagement__12__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__12__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %__Item1__, %Callable* %__Item2__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item2__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64, %Callable* }* + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 1 + store i64 %__Item1__, i64* %2, align 4 + store %Callable* %__Item2__, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item2__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item2__, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %__Item1__, { i64, %Callable* }* %__Item2__) { +entry: + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__Item2__, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { i64, %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }*, { i64, %Callable* }* }* getelementptr ({ { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Callable* }*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store { %Callable* }* %__Item1__, { %Callable* }** %8, align 8 + store { i64, %Callable* }* %__Item2__, { i64, %Callable* }** %9, align 8 + %10 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__Item2__, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 1) + %14 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 1) + %15 = bitcast { i64, %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + ret { { %Callable* }*, { i64, %Callable* }* }* %7 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %0, %Array* %__Item3__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array* }*, %Array* }* getelementptr ({ { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { %Array*, %Array* }*, %Array* }* + %3 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %2, i32 0, i32 1 + store { %Array*, %Array* }* %0, { %Array*, %Array* }** %3, align 8 + store %Array* %__Item3__, %Array** %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + %9 = bitcast { %Array*, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__Item3__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %2 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body(i64 %idxTerm) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i64* + store i64 0, i64* %2, align 4 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to double* + store double 0.000000e+00, double* %5, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %0, %Array** %8, align 8 + store %Array* %3, %Array** %9, align 8 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 0) + %12 = bitcast i8* %11 to i64* + store i64 0, i64* %12, align 4 + %13 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %7, %Array* %10) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %13 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__IdentityGeneratorSystem__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 0, %Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body(i64 %2) + %4 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %5 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %4, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %3, { { %Array*, %Array* }*, %Array* }** %5, align 8 + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__SimulationAlgorithm__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__SumGeneratorSystems__body(%Array* %generatorSystems) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %generatorSystems) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %generatorSystems, i64 %2) + %5 = bitcast i8* %4 to { i64, %Callable* }** + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %generatorSystems, i32 1) + %11 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__AddGeneratorSystems__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %12 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__IdentityGeneratorSystem__body() + %13 = call { i64, %Callable* }* @Microsoft__Quantum__Arrays___64d4e30656f641dea9a207bf3ea1393a_Fold__body(%Callable* %11, { i64, %Callable* }* %12, %Array* %generatorSystems) + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + %16 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %generatorSystems, i64 %17) + %20 = bitcast i8* %19 to { i64, %Callable* }** + %21 = load { i64, %Callable* }*, { i64, %Callable* }** %20, align 8 + %22 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %21, i32 0, i32 1 + %23 = load %Callable*, %Callable** %22, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %23, i32 -1) + %24 = bitcast { i64, %Callable* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %generatorSystems, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + %26 = bitcast { i64, %Callable* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + ret { i64, %Callable* }* %13 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Arrays___64d4e30656f641dea9a207bf3ea1393a_Fold__body(%Callable* %folder, { i64, %Callable* }* %state, %Array* %array) { +entry: + %current = alloca { i64, %Callable* }*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 1) + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %state, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %state to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %5) + %8 = bitcast i8* %7 to { i64, %Callable* }** + %9 = load { i64, %Callable* }*, { i64, %Callable* }** %8, align 8 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %9, i32 0, i32 1 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 1) + %12 = bitcast { i64, %Callable* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store { i64, %Callable* }* %state, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %14 = call %Range @Microsoft__Quantum__Arrays___9130f13c46ab4727adf43d8d67c99cc2_IndexRange__body(%Array* %array) + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %18 = icmp sgt i64 %16, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxElement = phi i64 [ %15, %preheader__1 ], [ %43, %exiting__2 ] + %19 = icmp sle i64 %idxElement, %17 + %20 = icmp sge i64 %idxElement, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = load { i64, %Callable* }*, { i64, %Callable* }** %current, align 8 + %23 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %22, i32 0, i32 1 + %24 = load %Callable*, %Callable** %23, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 1) + %25 = bitcast { i64, %Callable* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 1) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %27 = bitcast i8* %26 to { i64, %Callable* }** + %28 = load { i64, %Callable* }*, { i64, %Callable* }** %27, align 8 + %29 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %28, i32 0, i32 1 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 1) + %31 = bitcast { i64, %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { { i64, %Callable* }*, { i64, %Callable* }* }* + %34 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %33, i32 0, i32 1 + store { i64, %Callable* }* %22, { i64, %Callable* }** %34, align 8 + store { i64, %Callable* }* %28, { i64, %Callable* }** %35, align 8 + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }* }, { { i64, %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %folder, %Tuple* %32, %Tuple* %36) + %37 = bitcast %Tuple* %36 to { { i64, %Callable* }* }* + %38 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %37, i32 0, i32 0 + %39 = load { i64, %Callable* }*, { i64, %Callable* }** %38, align 8 + %40 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %39, i32 0, i32 1 + %41 = load %Callable*, %Callable** %40, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %41, i32 1) + %42 = bitcast { i64, %Callable* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + store { i64, %Callable* }* %39, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %43 = add i64 %idxElement, %16 + br label %header__2 + +exit__2: ; preds = %header__2 + %44 = load { i64, %Callable* }*, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %45 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %46 = phi i64 [ 0, %exit__2 ], [ %54, %exiting__3 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %46) + %49 = bitcast i8* %48 to { i64, %Callable* }** + %50 = load { i64, %Callable* }*, { i64, %Callable* }** %49, align 8 + %51 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %50, i32 0, i32 1 + %52 = load %Callable*, %Callable** %51, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %52, i32 -1) + %53 = bitcast { i64, %Callable* }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %54 = add i64 %46, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %55 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %44, i32 0, i32 1 + %56 = load %Callable*, %Callable** %55, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %56, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %56, i32 -1) + %57 = bitcast { i64, %Callable* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + ret { i64, %Callable* }* %44 +} + +define internal void @Microsoft__Quantum__Simulation__AddGeneratorSystems__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %5 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body({ i64, %Callable* }* %3, { i64, %Callable* }* %4) + %6 = bitcast %Tuple* %result-tuple to { { i64, %Callable* }* }* + %7 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %6, i32 0, i32 0 + store { i64, %Callable* }* %5, { i64, %Callable* }** %7, align 8 + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 %trotterOrder) { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, double, i64 }* + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store double %trotterStepSize, double* %4, align 8 + store i64 %trotterOrder, i64* %5, align 4 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__24__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__13__FunctionTable, %Tuple* %1) + %7 = call { %Callable* }* @Microsoft__Quantum__Simulation__SimulationAlgorithm__body(%Callable* %6) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + ret { %Callable* }* %7 +} + +define internal void @Lifted__PartialApplication__24__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %6 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 1 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 2 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %14 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store i64 %4, i64* %15, align 4 + store double %7, double* %16, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %6 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 1 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 2 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %14 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store i64 %4, i64* %15, align 4 + store double %7, double* %16, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 1 + %13 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 2 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 4 + store double %7, double* %18, align 8 + store i64 %9, i64* %19, align 4 + store double %11, double* %20, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %13, { { %Callable* }*, { i64, %Callable* }* }** %21, align 8 + store %Array* %15, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 1 + %13 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 2 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 4 + store double %7, double* %18, align 8 + store i64 %9, i64* %19, align 4 + store double %11, double* %20, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %13, { { %Callable* }*, { i64, %Callable* }* }** %21, align 8 + store %Array* %15, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load i64, i64* %2, align 4 + %8 = load double, double* %3, align 8 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____body(double %6, i64 %7, double %8, { { %Callable* }*, { i64, %Callable* }* }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load i64, i64* %2, align 4 + %8 = load double, double* %3, align 8 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____adj(double %6, i64 %7, double %8, { { %Callable* }*, { i64, %Callable* }* }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____ctl(%Array* %3, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef2__TrotterSimulationAlgorithmImpl____ctladj(%Array* %3, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__13__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__13__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__25__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, double, %Array* }* + %4 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %2, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + store i64 %5, i64* %13, align 4 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, double, %Array* }* + %4 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %2, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + store i64 %5, i64* %13, align 4 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, double, %Array* }*, { i64, double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 1 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 2 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %16 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %7, { { %Callable* }*, { i64, %Callable* }* }** %16, align 8 + store i64 %9, i64* %17, align 4 + store double %11, double* %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, double, %Array* }*, { i64, double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 1 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 2 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %16 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %7, { { %Callable* }*, { i64, %Callable* }* }** %16, align 8 + store i64 %9, i64* %17, align 4 + store double %11, double* %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %5 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %6 = load i64, i64* %2, align 4 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____body({ { %Callable* }*, { i64, %Callable* }* }* %5, i64 %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %5 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %6 = load i64, i64* %2, align 4 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____adj({ { %Callable* }*, { i64, %Callable* }* }* %5, i64 %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____ctl(%Array* %3, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef2__TrotterStepImpl____ctladj(%Array* %3, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__14__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__14__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__26__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @MemoryManagement__15__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__15__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Arrays___c1a84e3a65724cc49ff23eb15bf0b7bd_LookupFunction__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___baa85e836eb8473188447f7dd40ddae1_ElementAt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %array, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %array, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__27__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Callable* %20 +} + +define internal void @Lifted__PartialApplication__27__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Array* }* + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 1 + store i64 %2, i64* %8, align 4 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___baa85e836eb8473188447f7dd40ddae1_ElementAt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Array* }* + %1 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Callable* @Microsoft__Quantum__Arrays___baa85e836eb8473188447f7dd40ddae1_ElementAt__body(i64 %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Callable* }* + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + store %Callable* %5, %Callable** %7, align 8 + ret void +} + +define internal void @MemoryManagement__16__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__16__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Arrays___baa85e836eb8473188447f7dd40ddae1_ElementAt__body(i64 %index, %Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp sge i64 %index, 0 + br i1 %8, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %exit__1 + %9 = icmp slt i64 %index, %0 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %exit__1 + %10 = phi i1 [ %9, %condTrue__1 ], [ %8, %exit__1 ] + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @17, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %10, %String* %11) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %index) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %condContinue__1 + %16 = phi i64 [ 0, %condContinue__1 ], [ %21, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to %Callable** + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + ret %Callable* %14 +} + +define internal %Array* @Microsoft__Quantum__Arrays___00d673fd4b4c4c47a9ef358c9e077e10_Subarray__body(%Array* %indices, %Array* %array) { +entry: + %sliced = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %0 = icmp eq i64 %nSliced, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %3 = bitcast i8* %2 to i64* + %4 = load i64, i64* %3, align 4 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %4) + %6 = bitcast i8* %5 to %Qubit** + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nSliced) + %9 = sub i64 %nSliced, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %10 = phi i64 [ 0, %continue__1 ], [ %14, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to %Qubit** + store %Qubit* %7, %Qubit** %13, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %8, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %nSliced, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %27, %exiting__2 ] + %16 = icmp sle i64 %idx, %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %idx) + %20 = bitcast i8* %19 to i64* + %21 = load i64, i64* %20, align 4 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %23 = bitcast i8* %22 to %Qubit** + %24 = load %Qubit*, %Qubit** %23, align 8 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idx) + %26 = bitcast i8* %25 to %Qubit** + store %Qubit* %24, %Qubit** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %28 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + ret %Array* %28 +} + +define internal %Array* @Microsoft__Quantum__Arrays___0c8093b847cf4d60bc10980fe3db4834_Padded__body(i64 %nElementsTotal, { double, double }* %defaultElement, %Array* %inputArray) { +entry: + %0 = bitcast { double, double }* %defaultElement to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %1 = sub i64 %nElementsInitial, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %9 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %10 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @18, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %9, i1 true, %String* %10) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___dad9e1dc65fa4578b28eca0ca4d0b968_ConstantArray__body(i64 %nElementsPad, { double, double }* %defaultElement) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %padArray) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %20 = icmp sge i64 %nElementsTotal, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__2 + %21 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %21) + %23 = sub i64 %22, 1 + br label %header__3 + +condFalse__1: ; preds = %exit__2 + %24 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %24) + %26 = sub i64 %25, 1 + br label %header__4 + +condContinue__1: ; preds = %exit__4, %exit__3 + %27 = phi %Array* [ %21, %exit__3 ], [ %24, %exit__4 ] + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + %28 = sub i64 %nElementsInitial, 1 + br label %header__5 + +header__3: ; preds = %exiting__3, %condTrue__1 + %29 = phi i64 [ 0, %condTrue__1 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %23 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + br label %condContinue__1 + +header__4: ; preds = %exiting__4, %condFalse__1 + %36 = phi i64 [ 0, %condFalse__1 ], [ %42, %exiting__4 ] + %37 = icmp sle i64 %36, %26 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %42 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + br label %condContinue__1 + +header__5: ; preds = %exiting__5, %condContinue__1 + %43 = phi i64 [ 0, %condContinue__1 ], [ %49, %exiting__5 ] + %44 = icmp sle i64 %43, %28 + br i1 %44, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %43) + %46 = bitcast i8* %45 to { double, double }** + %47 = load { double, double }*, { double, double }** %46, align 8 + %48 = bitcast { double, double }* %47 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %49 = add i64 %43, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + %50 = sub i64 %11, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %51 = phi i64 [ 0, %exit__5 ], [ %57, %exiting__6 ] + %52 = icmp sle i64 %51, %50 + br i1 %52, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %51) + %54 = bitcast i8* %53 to { double, double }** + %55 = load { double, double }*, { double, double }** %54, align 8 + %56 = bitcast { double, double }* %55 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %57 = add i64 %51, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %58 = sub i64 %11, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %59 = phi i64 [ 0, %exit__6 ], [ %65, %exiting__7 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %59) + %62 = bitcast i8* %61 to { double, double }** + %63 = load { double, double }*, { double, double }** %62, align 8 + %64 = bitcast { double, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %65 = add i64 %59, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %27 +} + +define internal i64 @Microsoft__Quantum__Math__AbsI__body(i64 %a) { +entry: + %0 = icmp slt i64 %a, 0 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = sub i64 0, %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi i64 [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret i64 %2 +} + +define internal %Array* @Microsoft__Quantum__Arrays___dad9e1dc65fa4578b28eca0ca4d0b968_ConstantArray__body(i64 %length, { double, double }* %value) { +entry: + %0 = bitcast { double, double }* %value to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %2 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + store { double, double }* %value, { double, double }** %6, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret %Array* %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___a50b761db41347a2a0cd7b53be7db59d_ConstantArray__body(i64 %length, double %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to double* + store double %value, double* %5, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal %Array* @Microsoft__Quantum__Arrays___96177b3bdf29439bb7ea0c139e372afd_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %7 = icmp eq i64 %length, 0 + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %9 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %11 = bitcast i8* %10 to %Array** + %12 = load %Array*, %Array** %11, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %12, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %13, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { %Callable* }* + %18 = getelementptr inbounds { %Callable* }, { %Callable* }* %17, i32 0, i32 0 + %first = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %first, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %first, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %20 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %21 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %9 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %8 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %20 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %27) + %30 = bitcast i8* %29 to %Callable** + store %Callable* %first, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %first, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %first, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %19, %Array** %retval, align 8 + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %33 = phi i64 [ 0, %exit__3 ], [ %38, %exiting__4 ] + %34 = icmp sle i64 %33, %32 + br i1 %34, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %33) + %36 = bitcast i8* %35 to %Callable** + %37 = load %Callable*, %Callable** %36, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %37, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %37, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %38 = add i64 %33, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %39 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %56, %exiting__5 ] + %40 = icmp sle i64 %idx, %39 + br i1 %40, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %41 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %42 = call %Array* @__quantum__rt__array_copy(%Array* %41, i1 false) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %44 = bitcast i8* %43 to %Array** + %45 = load %Array*, %Array** %44, align 8 + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Array* }* + %48 = getelementptr inbounds { %Array* }, { %Array* }* %47, i32 0, i32 0 + store %Array* %45, %Array** %48, align 8 + %49 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %46, %Tuple* %49) + %50 = bitcast %Tuple* %49 to { %Callable* }* + %51 = getelementptr inbounds { %Callable* }, { %Callable* }* %50, i32 0, i32 0 + %52 = load %Callable*, %Callable** %51, align 8 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %idx) + %54 = bitcast i8* %53 to %Callable** + call void @__quantum__rt__capture_update_alias_count(%Callable* %52, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %52, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 1) + %55 = load %Callable*, %Callable** %54, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %55, i32 -1) + store %Callable* %52, %Callable** %54, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + store %Array* %42, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %56 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %57 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %58 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %59 = phi i64 [ 0, %exit__5 ], [ %64, %exiting__6 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %59) + %62 = bitcast i8* %61 to %Array** + %63 = load %Array*, %Array** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %63, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %64 = add i64 %59, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %first, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %first, i32 -1) + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %57) + %66 = sub i64 %65, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %67 = phi i64 [ 0, %exit__6 ], [ %72, %exiting__7 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %57, i64 %67) + %70 = bitcast i8* %69 to %Callable** + %71 = load %Callable*, %Callable** %70, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %71, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %71, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %72 = add i64 %67, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %first, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret %Array* %57 +} + +define internal %Array* @Microsoft__Quantum__Arrays___31a44c6111824f8e8c14091708980cfb_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to double* + %4 = load double, double* %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %4, double* %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { { double, double }* }* + %10 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %9, i32 0, i32 0 + %first = load { double, double }*, { double, double }** %10, align 8 + %11 = bitcast { double, double }* %first to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %13 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %14 = phi i64 [ 0, %continue__1 ], [ %18, %exiting__1 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %14) + %17 = bitcast i8* %16 to { double, double }** + store { double, double }* %first, { double, double }** %17, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %14, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %12, %Array** %retval, align 8 + %19 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %27 = sub i64 %length, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idx = phi i64 [ 1, %exit__2 ], [ %46, %exiting__3 ] + %28 = icmp sle i64 %idx, %27 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + %30 = call %Array* @__quantum__rt__array_copy(%Array* %29, i1 false) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %32 = bitcast i8* %31 to double* + %33 = load double, double* %32, align 8 + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double }* + %36 = getelementptr inbounds { double }, { double }* %35, i32 0, i32 0 + store double %33, double* %36, align 8 + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %34, %Tuple* %37) + %38 = bitcast %Tuple* %37 to { { double, double }* }* + %39 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %38, i32 0, i32 0 + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 %idx) + %42 = bitcast i8* %41 to { double, double }** + %43 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 1) + %44 = load { double, double }*, { double, double }** %42, align 8 + %45 = bitcast { double, double }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + store { double, double }* %40, { double, double }** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 1) + store %Array* %30, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %56, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { double, double }** + %54 = load { double, double }*, { double, double }** %53, align 8 + %55 = bitcast { double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %56 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %47 +} + +define internal %Array* @Microsoft__Quantum__Arrays___9d3e86ab94fe4a3a88b26f8cc32a1792_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %9 = icmp slt i64 %8, %0 + br i1 %9, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %8, %condTrue__1 ], [ %0, %condFalse__1 ] + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + %12 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %condContinue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %14 = bitcast i8* %13 to double* + %15 = load double, double* %14, align 8 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %17 = bitcast i8* %16 to %Array** + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { double, %Array* }* + %21 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %20, i32 0, i32 1 + store double %15, double* %21, align 8 + store %Array* %18, %Array** %22, align 8 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %24 = sub i64 %nElements, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %25) + %28 = bitcast i8* %27 to %Array** + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %24 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %31) + %34 = bitcast i8* %33 to { double, %Array* }** + store { double, %Array* }* %20, { double, %Array* }** %34, align 8 + %35 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %output, align 8 + %37 = sub i64 %nElements, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %38 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %38) + %41 = bitcast i8* %40 to { double, %Array* }** + %42 = load { double, %Array* }*, { double, %Array* }** %41, align 8 + %43 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %42, i32 0, i32 1 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { double, %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %47 = sub i64 %nElements, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idxElement = phi i64 [ 1, %exit__4 ], [ %67, %exiting__5 ] + %48 = icmp sle i64 %idxElement, %47 + br i1 %48, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = call %Array* @__quantum__rt__array_copy(%Array* %49, i1 false) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %52 = bitcast i8* %51 to double* + %53 = load double, double* %52, align 8 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %55 = bitcast i8* %54 to %Array** + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 1) + %57 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %58 = bitcast %Tuple* %57 to { double, %Array* }* + %59 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %58, i32 0, i32 0 + %60 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %58, i32 0, i32 1 + store double %53, double* %59, align 8 + store %Array* %56, %Array** %60, align 8 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %idxElement) + %62 = bitcast i8* %61 to { double, %Array* }** + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %63 = load { double, %Array* }*, { double, %Array* }** %62, align 8 + %64 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %63, i32 0, i32 1 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = bitcast { double, %Array* }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + store { double, %Array* }* %58, { double, %Array* }** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + store %Array* %50, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %67 = add i64 %idxElement, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %68 = load %Array*, %Array** %output, align 8 + %69 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + %70 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %76, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %71) + %74 = bitcast i8* %73 to %Array** + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %76 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %77 = call i64 @__quantum__rt__array_get_size_1d(%Array* %68) + %78 = sub i64 %77, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %79 = phi i64 [ 0, %exit__6 ], [ %87, %exiting__7 ] + %80 = icmp sle i64 %79, %78 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %68, i64 %79) + %82 = bitcast i8* %81 to { double, %Array* }** + %83 = load { double, %Array* }*, { double, %Array* }** %82, align 8 + %84 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %83, i32 0, i32 1 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { double, %Array* }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %87 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %68 +} + +define internal %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %9 = icmp slt i64 %0, %8 + br i1 %9, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %8, %condFalse__1 ] + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %12 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %condContinue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %14 = bitcast i8* %13 to %Array** + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double }* getelementptr ({ %Array*, double }, { %Array*, double }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, double }* + %21 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %20, i32 0, i32 1 + store %Array* %15, %Array** %21, align 8 + store double %18, double* %22, align 8 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %24 = sub i64 %nElements, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %25) + %28 = bitcast i8* %27 to %Array** + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %24 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %31) + %34 = bitcast i8* %33 to { %Array*, double }** + store { %Array*, double }* %20, { %Array*, double }** %34, align 8 + %35 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %output, align 8 + %37 = sub i64 %nElements, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %38 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %38) + %41 = bitcast i8* %40 to { %Array*, double }** + %42 = load { %Array*, double }*, { %Array*, double }** %41, align 8 + %43 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %42, i32 0, i32 0 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { %Array*, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %47 = sub i64 %nElements, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idxElement = phi i64 [ 1, %exit__4 ], [ %67, %exiting__5 ] + %48 = icmp sle i64 %idxElement, %47 + br i1 %48, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = call %Array* @__quantum__rt__array_copy(%Array* %49, i1 false) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %52 = bitcast i8* %51 to %Array** + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %55 = bitcast i8* %54 to double* + %56 = load double, double* %55, align 8 + %57 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double }* getelementptr ({ %Array*, double }, { %Array*, double }* null, i32 1) to i64)) + %58 = bitcast %Tuple* %57 to { %Array*, double }* + %59 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %58, i32 0, i32 0 + %60 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %58, i32 0, i32 1 + store %Array* %53, %Array** %59, align 8 + store double %56, double* %60, align 8 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %idxElement) + %62 = bitcast i8* %61 to { %Array*, double }** + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %63 = load { %Array*, double }*, { %Array*, double }** %62, align 8 + %64 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 0 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = bitcast { %Array*, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + store { %Array*, double }* %58, { %Array*, double }** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + store %Array* %50, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %67 = add i64 %idxElement, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %68 = load %Array*, %Array** %output, align 8 + %69 = load %Array*, %Array** %21, align 8 + %70 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %76, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %71) + %74 = bitcast i8* %73 to %Array** + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %76 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %77 = call i64 @__quantum__rt__array_get_size_1d(%Array* %68) + %78 = sub i64 %77, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %79 = phi i64 [ 0, %exit__6 ], [ %87, %exiting__7 ] + %80 = icmp sle i64 %79, %78 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %68, i64 %79) + %82 = bitcast i8* %81 to { %Array*, double }** + %83 = load { %Array*, double }*, { %Array*, double }** %82, align 8 + %84 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %83, i32 0, i32 0 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { %Array*, double }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %87 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %68 +} + +define internal %Array* @Microsoft__Quantum__Arrays___8865ac95eaf34017a6ca070332d1d0d3_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to i2* + %10 = load i2, i2* %9, align 1 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i2 }* getelementptr ({ i64, i2 }, { i64, i2 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, i2 }* + %13 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %12, i32 0, i32 1 + store i64 %7, i64* %13, align 4 + store i2 %10, i2* %14, align 1 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i64, i2 }** + store { i64, i2 }* %12, { i64, i2 }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i64, i2 }** + %27 = load { i64, i2 }*, { i64, i2 }** %26, align 8 + %28 = bitcast { i64, i2 }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i64* + %36 = load i64, i64* %35, align 4 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to i2* + %39 = load i2, i2* %38, align 1 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i2 }* getelementptr ({ i64, i2 }, { i64, i2 }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, i2 }* + %42 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %41, i32 0, i32 1 + store i64 %36, i64* %42, align 4 + store i2 %39, i2* %43, align 1 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i64, i2 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i64, i2 }*, { i64, i2 }** %45, align 8 + %47 = bitcast { i64, i2 }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i64, i2 }* %41, { i64, i2 }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i64, i2 }** + %56 = load { i64, i2 }*, { i64, i2 }** %55, align 8 + %57 = bitcast { i64, i2 }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal %Array* @Microsoft__Quantum__Arrays___b053fb32724e40cbbc4050de14397ee6_ConstantArray__body(i64 %length, i2 %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 %value, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal %Range @Microsoft__Quantum__Arrays___9130f13c46ab4727adf43d8d67c99cc2_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i64, %Callable* }** + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %11 = sub i64 %0, 1 + %12 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %11, 2 + %13 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %14 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %14) + %17 = bitcast i8* %16 to { i64, %Callable* }** + %18 = load { i64, %Callable* }*, { i64, %Callable* }** %17, align 8 + %19 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %18, i32 0, i32 1 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + %21 = bitcast { i64, %Callable* }* %18 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %14, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %12 +} + +define internal %Range @Microsoft__Quantum__Arrays___f06ffffa287a47609acf501f61b4d290_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal i1 @Microsoft__Quantum__Arrays___7ba2bd7c451647258703e1788550e293_IsEmpty__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %14 = icmp eq i64 %0, 0 + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to { { double, double }*, %Array* }** + %20 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 0 + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 -1) + %24 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + %26 = bitcast { { double, double }*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %14 +} + +define internal { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef1__ExtendedTruncation____body(double %value) { +entry: + %truncated = fptosi double %value to i64 + %0 = sitofp i64 %truncated to double + %1 = fsub double %0, %value + %2 = fcmp oge double %value, 0.000000e+00 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, i1 }* getelementptr ({ i64, double, i1 }, { i64, double, i1 }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i64, double, i1 }* + %5 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 2 + store i64 %truncated, i64* %5, align 4 + store double %1, double* %6, align 8 + store i1 %2, i1* %7, align 1 + ret { i64, double, i1 }* %4 +} + +define internal double @Microsoft__Quantum__Math__AbsComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %d = call double @Microsoft__Quantum__Math__AbsSquaredComplex__body({ double, double }* %input) + %1 = call double @__quantum__qis__sqrt__body(double %d) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %1 +} + +define internal double @Microsoft__Quantum__Math__AbsSquaredComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %real = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %imaginary = load double, double* %2, align 8 + %3 = fmul double %real, %real + %4 = fmul double %imaginary, %imaginary + %5 = fadd double %3, %4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %5 +} + +declare double @__quantum__qis__sqrt__body(double) + +define internal double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { +entry: + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + ret double %0 +} + +declare double @__quantum__qis__arctan2__body(double, double) + +define internal double @Microsoft__Quantum__Math__ArgComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %real = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %imaginary = load double, double* %2, align 8 + %3 = call double @__quantum__qis__arctan2__body(double %imaginary, double %real) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %3 +} + +define internal double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +define internal { double, double }* @Microsoft__Quantum__Math__Complex__body(double %Real, double %Imag) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Real, double* %2, align 8 + store double %Imag, double* %3, align 8 + ret { double, double }* %1 +} + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call double @Microsoft__Quantum__Math__AbsComplex__body({ double, double }* %input) + %2 = call double @Microsoft__Quantum__Math__ArgComplex__body({ double, double }* %input) + %3 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %1, double %2) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret { double, double }* %3 +} + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %Magnitude, double %Argument) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Magnitude, double* %2, align 8 + store double %Argument, double* %3, align 8 + ret { double, double }* %1 +} + +define internal double @Microsoft__Quantum__Math__Lg__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + %1 = call double @Microsoft__Quantum__Math__LogOf2__body() + %2 = fdiv double %0, %1 + ret double %2 +} + +declare double @__quantum__qis__log__body(double) + +define internal double @Microsoft__Quantum__Math__LogOf2__body() { +entry: + ret double 0x3FE62E42FEFA39EF +} + +define internal double @Microsoft__Quantum__Math__Log__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + ret double %0 +} + +define internal i64 @Microsoft__Quantum__Math__Max__body(%Array* %values) { +entry: + %max = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 0) + %1 = bitcast i8* %0 to i64* + %2 = load i64, i64* %1, align 4 + store i64 %2, i64* %max, align 4 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %3 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = load i64, i64* %max, align 4 + %9 = icmp sgt i64 %7, %8 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %11 = bitcast i8* %10 to i64* + %12 = load i64, i64* %11, align 4 + store i64 %12, i64* %max, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %13 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load i64, i64* %max, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + ret i64 %14 +} + +define internal i64 @Microsoft__Quantum__Math__Min__body(%Array* %values) { +entry: + %min = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 0) + %1 = bitcast i8* %0 to i64* + %2 = load i64, i64* %1, align 4 + store i64 %2, i64* %min, align 4 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %3 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = load i64, i64* %min, align 4 + %9 = icmp slt i64 %7, %8 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %11 = bitcast i8* %10 to i64* + %12 = load i64, i64* %11, align 4 + store i64 %12, i64* %min, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %13 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load i64, i64* %min, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + ret i64 %14 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.pow.f64(double, double) #0 + +define internal double @Microsoft__Quantum__Math__Sqrt__body(double %d) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double %d) + ret double %0 +} + +define internal %Array* @Microsoft__Quantum__Convert__BoolArrayAsPauli__body(i2 %pauli, i1 %bitApply, %Array* %bits) { +entry: + %paulis = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nBits) + %1 = sub i64 %nBits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %0, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %7 = sub i64 %nBits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %8 = icmp sle i64 %idxBit, %7 + br i1 %8, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %9 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + %10 = call %Array* @__quantum__rt__array_copy(%Array* %9, i1 false) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bits, i64 %idxBit) + %12 = bitcast i8* %11 to i1* + %13 = load i1, i1* %12, align 1 + %14 = icmp eq i1 %13, %bitApply + %15 = select i1 %14, i2 %pauli, i2 0 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %idxBit) + %17 = bitcast i8* %16 to i2* + store i2 %15, i2* %17, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + store %Array* %10, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 -1) + ret %Array* %19 +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____body(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____adj(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____ctl(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____ctladj(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____body(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %actualControl, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %__qsVar0__actualControl__, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %actualControl, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %__qsVar0__actualControl__, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____body(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array* }* + %5 = getelementptr inbounds { %Array* }, { %Array* }* %4, i32 0, i32 0 + store %Array* %1, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %bareOp, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____adj(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + store %Array* %1, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %4, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____ctl(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____ctladj(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Preparation____QsRef2__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficients, { %Range, i64 }* %0) { +entry: + %plan = alloca %Array*, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %10 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %10, align 4 + %11 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %11, align 4 + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %12, %Array** %plan, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = call { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef2__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) + %14 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 0 + %disentanglingY = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 1) + %15 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 1 + %disentanglingZ = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 2 + %newCoefficients = load %Array*, %Array** %16, align 8 + %17 = call i64 @__quantum__rt__array_get_size_1d(%Array* %newCoefficients) + %18 = sub i64 %17, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %19 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %20 = icmp sle i64 %19, %18 + br i1 %20, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %19) + %22 = bitcast i8* %21 to { double, double }** + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %19, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 1) + %26 = call i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingZ) + br i1 %26, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__2 + %27 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 3 + %34 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 4 + %35 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 5 + store %Callable* %27, %Callable** %30, align 8 + store double %tolerance, double* %31, align 8 + store %Array* %disentanglingZ, %Array** %32, align 8 + store i2 -2, i2* %33, align 1 + store %Range %rngControl, %Range* %34, align 4 + store i64 %idxTarget, i64* %35, align 4 + %36 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__28__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__17__FunctionTable, %Tuple* %28) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to %Callable** + store %Callable* %36, %Callable** %39, align 8 + %40 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 0) + %42 = bitcast i8* %41 to %Callable** + store %Callable* %36, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 1) + br label %header__3 + +continue__1: ; preds = %exit__4, %exit__2 + %43 = call i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingY) + br i1 %43, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %44 = load %Array*, %Array** %plan, align 8 + %45 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 1) + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %48 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 0 + %49 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 1 + %50 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 2 + %51 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 3 + %52 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 4 + %53 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 5 + store %Callable* %45, %Callable** %48, align 8 + store double %tolerance, double* %49, align 8 + store %Array* %disentanglingY, %Array** %50, align 8 + store i2 -1, i2* %51, align 1 + store %Range %rngControl, %Range* %52, align 4 + store i64 %idxTarget, i64* %53, align 4 + %54 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__29__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__17__FunctionTable, %Tuple* %46) + %55 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to %Callable** + store %Callable* %54, %Callable** %57, align 8 + %58 = call %Array* @__quantum__rt__array_concatenate(%Array* %44, %Array* %55) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %58) + %60 = sub i64 %59, 1 + br label %header__5 + +continue__2: ; preds = %exit__9, %continue__1 + %61 = call i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rngControl) + br i1 %61, label %then0__3, label %test1__1 + +then0__3: ; preds = %continue__2 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 0) + %63 = bitcast i8* %62 to { double, double }** + %64 = load { double, double }*, { double, double }** %63, align 8 + %65 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 0 + %abs = load double, double* %65, align 8 + %66 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 1 + %arg = load double, double* %66, align 8 + %67 = call double @Microsoft__Quantum__Math__AbsD__body(double %arg) + %68 = fcmp ogt double %67, %tolerance + br i1 %68, label %then0__4, label %continue__4 + +then0__4: ; preds = %then0__3 + %69 = load %Array*, %Array** %plan, align 8 + %70 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %71 = fmul double -1.000000e+00, %arg + %72 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %73 = bitcast %Tuple* %72 to { %Callable*, double, i64 }* + %74 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 0 + %75 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 1 + %76 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 2 + store %Callable* %70, %Callable** %74, align 8 + store double %71, double* %75, align 8 + store i64 %idxTarget, i64* %76, align 4 + %77 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__30__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__18__FunctionTable, %Tuple* %72) + %78 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to %Callable** + store %Callable* %77, %Callable** %80, align 8 + %81 = call %Array* @__quantum__rt__array_concatenate(%Array* %69, %Array* %78) + %82 = call i64 @__quantum__rt__array_get_size_1d(%Array* %81) + %83 = sub i64 %82, 1 + br label %header__10 + +continue__4: ; preds = %exit__14, %then0__3 + br label %continue__3 + +test1__1: ; preds = %continue__2 + %84 = call i1 @Microsoft__Quantum__Canon____QsRef2__AnyOutsideToleranceCP____body(double %tolerance, %Array* %newCoefficients) + br i1 %84, label %then1__1, label %continue__3 + +then1__1: ; preds = %test1__1 + %85 = extractvalue %Range %rngControl, 0 + %86 = extractvalue %Range %rngControl, 1 + %87 = extractvalue %Range %rngControl, 2 + %88 = add i64 %85, 1 + %89 = extractvalue %Range %rngControl, 0 + %90 = extractvalue %Range %rngControl, 1 + %91 = extractvalue %Range %rngControl, 2 + %92 = extractvalue %Range %rngControl, 0 + %93 = extractvalue %Range %rngControl, 1 + %94 = extractvalue %Range %rngControl, 2 + %95 = insertvalue %Range zeroinitializer, i64 %88, 0 + %96 = insertvalue %Range %95, i64 %90, 1 + %newControl = insertvalue %Range %96, i64 %94, 2 + %newTarget = extractvalue %Range %rngControl, 0 + %97 = extractvalue %Range %rngControl, 1 + %98 = extractvalue %Range %rngControl, 2 + %99 = load %Array*, %Array** %plan, align 8 + %100 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %101 = bitcast %Tuple* %100 to { %Range, i64 }* + %102 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 0 + %103 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 1 + store %Range %newControl, %Range* %102, align 4 + store i64 %newTarget, i64* %103, align 4 + %104 = call %Array* @Microsoft__Quantum__Preparation____QsRef2__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %newCoefficients, { %Range, i64 }* %101) + %105 = call %Array* @__quantum__rt__array_concatenate(%Array* %99, %Array* %104) + %106 = call i64 @__quantum__rt__array_get_size_1d(%Array* %105) + %107 = sub i64 %106, 1 + br label %header__15 + +continue__3: ; preds = %exit__19, %test1__1, %continue__4 + %108 = load %Array*, %Array** %plan, align 8 + %109 = sub i64 %1, 1 + br label %header__20 + +header__3: ; preds = %exiting__3, %then0__1 + %110 = phi i64 [ 0, %then0__1 ], [ %115, %exiting__3 ] + %111 = icmp sle i64 %110, 0 + br i1 %111, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 %110) + %113 = bitcast i8* %112 to %Callable** + %114 = load %Callable*, %Callable** %113, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %114, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %114, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %115 = add i64 %110, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + store %Array* %40, %Array** %plan, align 8 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %116 = phi i64 [ 0, %exit__3 ], [ %121, %exiting__4 ] + %117 = icmp sle i64 %116, 0 + br i1 %117, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 %116) + %119 = bitcast i8* %118 to %Callable** + %120 = load %Callable*, %Callable** %119, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %120, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %120, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %121 = add i64 %116, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + br label %continue__1 + +header__5: ; preds = %exiting__5, %then0__2 + %122 = phi i64 [ 0, %then0__2 ], [ %127, %exiting__5 ] + %123 = icmp sle i64 %122, %60 + br i1 %123, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %122) + %125 = bitcast i8* %124 to %Callable** + %126 = load %Callable*, %Callable** %125, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %126, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %126, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %127 = add i64 %122, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 1) + %128 = sub i64 %59, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %129 = phi i64 [ 0, %exit__5 ], [ %134, %exiting__6 ] + %130 = icmp sle i64 %129, %128 + br i1 %130, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %129) + %132 = bitcast i8* %131 to %Callable** + %133 = load %Callable*, %Callable** %132, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %133, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %133, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %134 = add i64 %129, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %135 = call i64 @__quantum__rt__array_get_size_1d(%Array* %44) + %136 = sub i64 %135, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %137 = phi i64 [ 0, %exit__6 ], [ %142, %exiting__7 ] + %138 = icmp sle i64 %137, %136 + br i1 %138, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %139 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %137) + %140 = bitcast i8* %139 to %Callable** + %141 = load %Callable*, %Callable** %140, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %141, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %141, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %142 = add i64 %137, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %143 = sub i64 %135, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %144 = phi i64 [ 0, %exit__7 ], [ %149, %exiting__8 ] + %145 = icmp sle i64 %144, %143 + br i1 %145, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %144) + %147 = bitcast i8* %146 to %Callable** + %148 = load %Callable*, %Callable** %147, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %148, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %148, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %149 = add i64 %144, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + store %Array* %58, %Array** %plan, align 8 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %150 = phi i64 [ 0, %exit__8 ], [ %155, %exiting__9 ] + %151 = icmp sle i64 %150, 0 + br i1 %151, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 %150) + %153 = bitcast i8* %152 to %Callable** + %154 = load %Callable*, %Callable** %153, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %154, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %154, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %155 = add i64 %150, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 -1) + br label %continue__2 + +header__10: ; preds = %exiting__10, %then0__4 + %156 = phi i64 [ 0, %then0__4 ], [ %161, %exiting__10 ] + %157 = icmp sle i64 %156, %83 + br i1 %157, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %158 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %156) + %159 = bitcast i8* %158 to %Callable** + %160 = load %Callable*, %Callable** %159, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %160, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %160, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %161 = add i64 %156, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %162 = sub i64 %82, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %163) + %166 = bitcast i8* %165 to %Callable** + %167 = load %Callable*, %Callable** %166, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %167, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %167, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 1) + %169 = call i64 @__quantum__rt__array_get_size_1d(%Array* %69) + %170 = sub i64 %169, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %176, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %171) + %174 = bitcast i8* %173 to %Callable** + %175 = load %Callable*, %Callable** %174, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %175, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %175, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %176 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 -1) + %177 = sub i64 %169, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %178 = phi i64 [ 0, %exit__12 ], [ %183, %exiting__13 ] + %179 = icmp sle i64 %178, %177 + br i1 %179, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %180 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %178) + %181 = bitcast i8* %180 to %Callable** + %182 = load %Callable*, %Callable** %181, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %182, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %182, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %183 = add i64 %178, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + store %Array* %81, %Array** %plan, align 8 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %184 = phi i64 [ 0, %exit__13 ], [ %189, %exiting__14 ] + %185 = icmp sle i64 %184, 0 + br i1 %185, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 %184) + %187 = bitcast i8* %186 to %Callable** + %188 = load %Callable*, %Callable** %187, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %188, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %188, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %189 = add i64 %184, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + br label %continue__4 + +header__15: ; preds = %exiting__15, %then1__1 + %190 = phi i64 [ 0, %then1__1 ], [ %195, %exiting__15 ] + %191 = icmp sle i64 %190, %107 + br i1 %191, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %190) + %193 = bitcast i8* %192 to %Callable** + %194 = load %Callable*, %Callable** %193, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %194, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %194, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %195 = add i64 %190, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 1) + %196 = sub i64 %106, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %197 = phi i64 [ 0, %exit__15 ], [ %202, %exiting__16 ] + %198 = icmp sle i64 %197, %196 + br i1 %198, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %199 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %197) + %200 = bitcast i8* %199 to %Callable** + %201 = load %Callable*, %Callable** %200, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %201, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %201, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %202 = add i64 %197, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %105, i32 1) + %203 = call i64 @__quantum__rt__array_get_size_1d(%Array* %99) + %204 = sub i64 %203, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %205 = phi i64 [ 0, %exit__16 ], [ %210, %exiting__17 ] + %206 = icmp sle i64 %205, %204 + br i1 %206, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %205) + %208 = bitcast i8* %207 to %Callable** + %209 = load %Callable*, %Callable** %208, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %209, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %209, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %210 = add i64 %205, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + %211 = sub i64 %203, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %212 = phi i64 [ 0, %exit__17 ], [ %217, %exiting__18 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %212) + %215 = bitcast i8* %214 to %Callable** + %216 = load %Callable*, %Callable** %215, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %216, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %216, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %217 = add i64 %212, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 -1) + store %Array* %105, %Array** %plan, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %100, i32 -1) + %218 = call i64 @__quantum__rt__array_get_size_1d(%Array* %104) + %219 = sub i64 %218, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %220 = phi i64 [ 0, %exit__18 ], [ %225, %exiting__19 ] + %221 = icmp sle i64 %220, %219 + br i1 %221, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 %220) + %223 = bitcast i8* %222 to %Callable** + %224 = load %Callable*, %Callable** %223, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %224, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %224, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %225 = add i64 %220, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + br label %continue__3 + +header__20: ; preds = %exiting__20, %continue__3 + %226 = phi i64 [ 0, %continue__3 ], [ %232, %exiting__20 ] + %227 = icmp sle i64 %226, %109 + br i1 %227, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %228 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %226) + %229 = bitcast i8* %228 to { double, double }** + %230 = load { double, double }*, { double, double }** %229, align 8 + %231 = bitcast { double, double }* %230 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %231, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %232 = add i64 %226, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %233 = call i64 @__quantum__rt__array_get_size_1d(%Array* %108) + %234 = sub i64 %233, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %235 = phi i64 [ 0, %exit__20 ], [ %240, %exiting__21 ] + %236 = icmp sle i64 %235, %234 + br i1 %236, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %235) + %238 = bitcast i8* %237 to %Callable** + %239 = load %Callable*, %Callable** %238, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %239, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %239, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %240 = add i64 %235, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %108, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 -1) + %241 = sub i64 %17, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %242 = phi i64 [ 0, %exit__21 ], [ %248, %exiting__22 ] + %243 = icmp sle i64 %242, %241 + br i1 %243, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %242) + %245 = bitcast i8* %244 to { double, double }** + %246 = load { double, double }*, { double, double }** %245, align 8 + %247 = bitcast { double, double }* %246 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %248 = add i64 %242, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 -1) + %249 = sub i64 %17, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %250 = phi i64 [ 0, %exit__22 ], [ %256, %exiting__23 ] + %251 = icmp sle i64 %250, %249 + br i1 %251, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %252 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %250) + %253 = bitcast i8* %252 to { double, double }** + %254 = load { double, double }*, { double, double }** %253, align 8 + %255 = bitcast { double, double }* %254 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %255, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %256 = add i64 %250, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_reference_count(%Array* %newCoefficients, i32 -1) + %257 = bitcast { %Array*, %Array*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %257, i32 -1) + ret %Array* %108 +} + +define internal { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef2__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) { +entry: + %newCoefficients = alloca %Array*, align 8 + %disentanglingY = alloca %Array*, align 8 + %disentanglingZ = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sdiv i64 %0, 2 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %9) + %11 = sub i64 %9, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %12) + %15 = bitcast i8* %14 to double* + store double 0.000000e+00, double* %15, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %10, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %17 = sdiv i64 %0, 2 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %17) + %19 = sub i64 %17, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %20 = phi i64 [ 0, %exit__2 ], [ %24, %exiting__3 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %20) + %23 = bitcast i8* %22 to double* + store double 0.000000e+00, double* %23, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %24 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %25 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %26 = sdiv i64 %0, 2 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %26) + %28 = sub i64 %26, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %29 = phi i64 [ 0, %exit__3 ], [ %34, %exiting__4 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + store { double, double }* %25, { double, double }** %32, align 8 + %33 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %34 = add i64 %29, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + store %Array* %27, %Array** %newCoefficients, align 8 + %35 = sub i64 %26, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %36 = phi i64 [ 0, %exit__4 ], [ %42, %exiting__5 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %41, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %42 = add i64 %36, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %43 = sub i64 %0, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__5 + br label %header__6 + +header__6: ; preds = %exiting__6, %preheader__1 + %idxCoeff = phi i64 [ 0, %preheader__1 ], [ %80, %exiting__6 ] + %44 = icmp sle i64 %idxCoeff, %43 + %45 = icmp sge i64 %idxCoeff, %43 + %46 = select i1 true, i1 %44, i1 %45 + br i1 %46, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = add i64 %idxCoeff, 1 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %50) + %52 = bitcast i8* %51 to { double, double }** + %53 = load { double, double }*, { double, double }** %52, align 8 + %54 = call { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %49, { double, double }* %53) + %55 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 0 + %rt = load { double, double }*, { double, double }** %55, align 8 + %56 = bitcast { double, double }* %rt to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 1 + %phi = load double, double* %57, align 8 + %58 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 2 + %theta = load double, double* %58, align 8 + %59 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %59, i32 -1) + %60 = call %Array* @__quantum__rt__array_copy(%Array* %59, i1 false) + %61 = fmul double 5.000000e-01, %phi + %62 = sdiv i64 %idxCoeff, 2 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %62) + %64 = bitcast i8* %63 to double* + store double %61, double* %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 1) + store %Array* %60, %Array** %disentanglingZ, align 8 + %65 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = call %Array* @__quantum__rt__array_copy(%Array* %65, i1 false) + %67 = fmul double 5.000000e-01, %theta + %68 = sdiv i64 %idxCoeff, 2 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 %68) + %70 = bitcast i8* %69 to double* + %71 = load double, double* %70, align 8 + store double %67, double* %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + store %Array* %66, %Array** %disentanglingY, align 8 + %72 = load %Array*, %Array** %newCoefficients, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = call %Array* @__quantum__rt__array_copy(%Array* %72, i1 false) + %74 = sdiv i64 %idxCoeff, 2 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %74) + %76 = bitcast i8* %75 to { double, double }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 1) + %77 = load { double, double }*, { double, double }** %76, align 8 + %78 = bitcast { double, double }* %77 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i32 -1) + store { double, double }* %rt, { double, double }** %76, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 1) + store %Array* %73, %Array** %newCoefficients, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + %79 = bitcast { { double, double }*, double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %idxCoeff, 2 + br label %header__6 + +exit__6: ; preds = %header__6 + %81 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %82 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 1) + %83 = load %Array*, %Array** %newCoefficients, align 8 + %84 = call i64 @__quantum__rt__array_get_size_1d(%Array* %83) + %85 = sub i64 %84, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %86 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %87 = icmp sle i64 %86, %85 + br i1 %87, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %86) + %89 = bitcast i8* %88 to { double, double }** + %90 = load { double, double }*, { double, double }** %89, align 8 + %91 = bitcast { double, double }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %86, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 1) + %93 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Array* }* getelementptr ({ %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* null, i32 1) to i64)) + %94 = bitcast %Tuple* %93 to { %Array*, %Array*, %Array* }* + %95 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 0 + %96 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 1 + %97 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 2 + store %Array* %81, %Array** %95, align 8 + store %Array* %82, %Array** %96, align 8 + store %Array* %83, %Array** %97, align 8 + %98 = sub i64 %0, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %99 = phi i64 [ 0, %exit__7 ], [ %105, %exiting__8 ] + %100 = icmp sle i64 %99, %98 + br i1 %100, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %99) + %102 = bitcast i8* %101 to { double, double }** + %103 = load { double, double }*, { double, double }** %102, align 8 + %104 = bitcast { double, double }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %105 = add i64 %99, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + %106 = sub i64 %84, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %107 = phi i64 [ 0, %exit__8 ], [ %113, %exiting__9 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %107) + %110 = bitcast i8* %109 to { double, double }** + %111 = load { double, double }*, { double, double }** %110, align 8 + %112 = bitcast { double, double }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %112, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %113 = add i64 %107, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %83, i32 -1) + %114 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + %115 = sub i64 %84, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %116 = phi i64 [ 0, %exit__9 ], [ %122, %exiting__10 ] + %117 = icmp sle i64 %116, %115 + br i1 %117, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %116) + %119 = bitcast i8* %118 to { double, double }** + %120 = load { double, double }*, { double, double }** %119, align 8 + %121 = bitcast { double, double }* %120 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %121, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %122 = add i64 %116, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + ret { %Array*, %Array*, %Array* }* %94 +} + +define internal void @Lifted__PartialApplication__28__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____body(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____adj(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____ctl(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyMultiplexStep____ctladj(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__17__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__17__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__29__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____body(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____adj(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____ctl(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyGlobalRotationStep____ctladj(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__18__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__18__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %a0, { double, double }* %a1) { +entry: + %0 = bitcast { double, double }* %a0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = bitcast { double, double }* %a1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %abs0 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a0) + %abs1 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a1) + %arg0 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a0) + %arg1 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a1) + %2 = fmul double %abs0, %abs0 + %3 = fmul double %abs1, %abs1 + %d = fadd double %2, %3 + %r = call double @__quantum__qis__sqrt__body(double %d) + %4 = fadd double %arg0, %arg1 + %t = fmul double 5.000000e-01, %4 + %phi = fsub double %arg1, %arg0 + %5 = call double @__quantum__qis__arctan2__body(double %abs1, double %abs0) + %theta = fmul double 2.000000e+00, %5 + %6 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %r, double %t) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }*, double, double }* getelementptr ({ { double, double }*, double, double }, { { double, double }*, double, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { { double, double }*, double, double }* + %9 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 2 + store { double, double }* %6, { double, double }** %9, align 8 + store double %phi, double* %10, align 8 + store double %theta, double* %11, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + ret { { double, double }*, double, double }* %8 +} + +define internal %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %nQubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = trunc i64 %nQubits to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %12 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___0c8093b847cf4d60bc10980fe3db4834_Padded__body(i64 %11, { double, double }* %12, %Array* %coefficients) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %22 = icmp sgt i64 %nQubits, 1 + %23 = sub i64 %nQubits, 1 + %24 = insertvalue %Range { i64 1, i64 1, i64 0 }, i64 %23, 2 + %rngControl = select i1 %22, %Range %24, %Range { i64 1, i64 1, i64 0 } + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Range, i64 }* + %27 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 1 + store %Range %rngControl, %Range* %27, align 4 + store i64 0, i64* %28, align 4 + %plan = call %Array* @Microsoft__Quantum__Preparation____QsRef2__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficientsPadded, { %Range, i64 }* %26) + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %plan) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %31) + %34 = bitcast i8* %33 to %Callable** + %35 = load %Callable*, %Callable** %34, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %35, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 1) + %unprepare = call %Callable* @Microsoft__Quantum__Canon___1fe14bbd24584359ab40c526d5861af6_BoundCA__body(%Array* %plan) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 1) + %37 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %38 = call %Callable* @__quantum__rt__callable_copy(%Callable* %unprepare, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %38) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, %Callable* }* + %41 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 1 + store %Callable* %37, %Callable** %41, align 8 + store %Callable* %38, %Callable** %42, align 8 + %43 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__31__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__19__FunctionTable, %Tuple* %39) + %44 = sub i64 %0, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %51, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %45) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = bitcast { double, double }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %50, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %51 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %52 = sub i64 %13, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %53 = phi i64 [ 0, %exit__4 ], [ %59, %exiting__5 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %53) + %56 = bitcast i8* %55 to { double, double }** + %57 = load { double, double }*, { double, double }** %56, align 8 + %58 = bitcast { double, double }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %59 = add i64 %53, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + %60 = sub i64 %29, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %61 = phi i64 [ 0, %exit__5 ], [ %66, %exiting__6 ] + %62 = icmp sle i64 %61, %60 + br i1 %62, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %61) + %64 = bitcast i8* %63 to %Callable** + %65 = load %Callable*, %Callable** %64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %65, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %65, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %66 = add i64 %61, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 -1) + %67 = bitcast { double, double }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %67, i32 -1) + %68 = sub i64 %13, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %69 = phi i64 [ 0, %exit__6 ], [ %75, %exiting__7 ] + %70 = icmp sle i64 %69, %68 + br i1 %70, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %69) + %72 = bitcast i8* %71 to { double, double }** + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %75 = add i64 %69, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + %76 = sub i64 %29, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %77 = phi i64 [ 0, %exit__7 ], [ %82, %exiting__8 ] + %78 = icmp sle i64 %77, %76 + br i1 %78, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %77) + %80 = bitcast i8* %79 to %Callable** + %81 = load %Callable*, %Callable** %80, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %81, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %81, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %82 = add i64 %77, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unprepare, i32 -1) + ret %Callable* %43 +} + +define internal void @Lifted__PartialApplication__31__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____body(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____adj(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____ctl(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef2__ApplyToLittleEndian____ctladj(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @MemoryManagement__19__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__19__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %13 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %12) + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %11, %Tuple* null) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %13 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %12) + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %13, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %14) + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %11, %Tuple* null) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %17 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %16) + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { %Array* }* }* + %21 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %21, align 8 + store { %Array* }* %qubits, { %Array* }** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %23 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %24) + %27 = bitcast i8* %26 to { double, double }** + %28 = load { double, double }*, { double, double }** %27, align 8 + %29 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %17 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %16) + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { %Array* }* }* + %21 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %21, align 8 + store { %Array* }* %qubits, { %Array* }** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %23 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %24) + %27 = bitcast i8* %26 to { double, double }** + %28 = load { double, double }*, { double, double }** %27, align 8 + %29 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double 0.000000e+00, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__32__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %4) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @Microsoft__Quantum__Canon___ac50acd82a7e42128d811608cb927809_Compose__body(%Callable* %8, %Callable* %9) + %coefficientsAsComplexPolar = call %Array* @Microsoft__Quantum__Arrays___31a44c6111824f8e8c14091708980cfb_Mapped__body(%Callable* %10, %Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsAsComplexPolar) + %12 = sub i64 %11, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %13 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double %tolerance, %Array* %coefficientsAsComplexPolar, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %20 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %21) + %24 = bitcast i8* %23 to { double, double }** + %25 = load { double, double }*, { double, double }** %24, align 8 + %26 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %28 = sub i64 %11, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Math__ComplexPolar__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { { double, double }* }* + %7 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %6, i32 0, i32 0 + store { double, double }* %5, { double, double }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__20__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__20__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Math__AbsD__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = call double @Microsoft__Quantum__Math__AbsD__body(double %2) + %4 = bitcast %Tuple* %result-tuple to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + store double %3, double* %5, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double 0.000000e+00, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__33__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %4) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @Microsoft__Quantum__Canon___ac50acd82a7e42128d811608cb927809_Compose__body(%Callable* %8, %Callable* %9) + %__qsVar0__coefficientsAsComplexPolar__ = call %Array* @Microsoft__Quantum__Arrays___31a44c6111824f8e8c14091708980cfb_Mapped__body(%Callable* %10, %Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__) + %12 = sub i64 %11, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %13 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double %tolerance, %Array* %__qsVar0__coefficientsAsComplexPolar__, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %20 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %21) + %24 = bitcast i8* %23 to { double, double }** + %25 = load { double, double }*, { double, double }** %24, align 8 + %26 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %28 = sub i64 %11, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double 0.000000e+00, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__34__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %8) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = call %Callable* @Microsoft__Quantum__Canon___ac50acd82a7e42128d811608cb927809_Compose__body(%Callable* %12, %Callable* %13) + %coefficientsAsComplexPolar = call %Array* @Microsoft__Quantum__Arrays___31a44c6111824f8e8c14091708980cfb_Mapped__body(%Callable* %14, %Array* %coefficients) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsAsComplexPolar) + %16 = sub i64 %15, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %17 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 1) + %24 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array*, { %Array* }* }* + %34 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 2 + store double %tolerance, double* %34, align 8 + store %Array* %coefficientsAsComplexPolar, %Array** %35, align 8 + store { %Array* }* %qubits, { %Array* }** %36, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %33) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + %37 = sub i64 %15, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %38) + %41 = bitcast i8* %40 to { double, double }** + %42 = load { double, double }*, { double, double }** %41, align 8 + %43 = bitcast { double, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + %45 = sub i64 %15, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %46) + %49 = bitcast i8* %48 to { double, double }** + %50 = load { double, double }*, { double, double }** %49, align 8 + %51 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + %53 = sub i64 %15, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %60, %exiting__5 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %54) + %57 = bitcast i8* %56 to { double, double }** + %58 = load { double, double }*, { double, double }** %57, align 8 + %59 = bitcast { double, double }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %60 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double 0.000000e+00, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__35__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %8) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = call %Callable* @Microsoft__Quantum__Canon___ac50acd82a7e42128d811608cb927809_Compose__body(%Callable* %12, %Callable* %13) + %__qsVar0__coefficientsAsComplexPolar__ = call %Array* @Microsoft__Quantum__Arrays___31a44c6111824f8e8c14091708980cfb_Mapped__body(%Callable* %14, %Array* %coefficients) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__) + %16 = sub i64 %15, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %17 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + %24 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array*, { %Array* }* }* + %34 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 2 + store double %tolerance, double* %34, align 8 + store %Array* %__qsVar0__coefficientsAsComplexPolar__, %Array** %35, align 8 + store { %Array* }* %qubits, { %Array* }** %36, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %33) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + %37 = sub i64 %15, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %38) + %41 = bitcast i8* %40 to { double, double }** + %42 = load { double, double }*, { double, double }** %41, align 8 + %43 = bitcast { double, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + %45 = sub i64 %15, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %46) + %49 = bitcast i8* %48 to { double, double }** + %50 = load { double, double }*, { double, double }** %49, align 8 + %51 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + %53 = sub i64 %15, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %60, %exiting__5 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %54) + %57 = bitcast i8* %56 to { double, double }** + %58 = load { double, double }*, { double, double }** %57, align 8 + %59 = bitcast { double, double }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %60 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__body(%Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__adj(%Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %4) + %7 = bitcast i8* %6 to { double, double }** + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %11, align 8 + %12 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { double, %Array*, { %Array* }* }* + %25 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 2 + store double 0.000000e+00, double* %25, align 8 + store %Array* %coefficients, %Array** %26, align 8 + store { %Array* }* %qubits, { %Array* }** %27, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %36 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %43, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %37) + %40 = bitcast i8* %39 to { double, double }** + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %43 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %4) + %7 = bitcast i8* %6 to { double, double }** + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %11, align 8 + %12 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { double, %Array*, { %Array* }* }* + %25 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 2 + store double 0.000000e+00, double* %25, align 8 + store %Array* %coefficients, %Array** %26, align 8 + store { %Array* }* %qubits, { %Array* }** %27, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %36 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %43, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %37) + %40 = bitcast i8* %39 to { double, double }** + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %43 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__body(%Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__body(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__adj(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array*, { %Array* }* }* + %8 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 2 + store double 0.000000e+00, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %qubits, { %Array* }** %10, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %7) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array*, { %Array* }* }* + %8 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 2 + store double 0.000000e+00, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %qubits, { %Array* }** %10, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %7) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQRSTerm____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %p = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %q = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %12 = bitcast i8* %11 to i64* + %r = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %14 = bitcast i8* %13 to i64* + %s = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %18 = fmul double 1.250000e-01, %17 + %angle = fmul double %18, %stepSize + %19 = icmp eq i64 %p, %q + br i1 %19, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %20 = icmp eq i64 %p, %r + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %21 = phi i1 [ %19, %entry ], [ %20, %condFalse__1 ] + br i1 %21, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %22 = icmp eq i64 %p, %s + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %23 = phi i1 [ %21, %condContinue__1 ], [ %22, %condFalse__2 ] + br i1 %23, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %24 = icmp eq i64 %q, %r + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %25 = phi i1 [ %23, %condContinue__2 ], [ %24, %condFalse__3 ] + br i1 %25, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %26 = icmp eq i64 %q, %s + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %27 = phi i1 [ %25, %condContinue__3 ], [ %26, %condFalse__4 ] + br i1 %27, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %28 = icmp eq i64 %r, %s + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %29 = phi i1 [ %27, %condContinue__4 ], [ %28, %condFalse__5 ] + br i1 %29, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %31 = call %String* @__quantum__rt__int_to_string(i64 %p) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__int_to_string(i64 %q) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %r) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %s) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %46) + unreachable + +continue__1: ; preds = %condContinue__5 + %47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 1) + %51 = bitcast i8* %50 to i2* + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 2) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 3) + %55 = bitcast i8* %54 to i2* + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + store i2 1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i2* + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 2) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 3) + %64 = bitcast i8* %63 to i2* + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + store i2 1, i2* %62, align 1 + store i2 -1, i2* %64, align 1 + %65 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i2* + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 2) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 3) + %73 = bitcast i8* %72 to i2* + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + store i2 -1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + %74 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 2) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 3) + %82 = bitcast i8* %81 to i2* + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + store i2 -1, i2* %80, align 1 + store i2 -1, i2* %82, align 1 + %83 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 0) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 1) + %87 = bitcast i8* %86 to i2* + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 2) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 3) + %91 = bitcast i8* %90 to i2* + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + store i2 1, i2* %89, align 1 + store i2 1, i2* %91, align 1 + %92 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i2* + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 2) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 3) + %100 = bitcast i8* %99 to i2* + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + store i2 1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + %101 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i2* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i2* + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 2) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 3) + %109 = bitcast i8* %108 to i2* + store i2 -1, i2* %103, align 1 + store i2 -1, i2* %105, align 1 + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + %110 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i2* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i2* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %112, align 1 + store i2 1, i2* %114, align 1 + store i2 -1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %134 = bitcast i8* %133 to %Array** + store %Array* %47, %Array** %120, align 8 + store %Array* %56, %Array** %122, align 8 + store %Array* %65, %Array** %124, align 8 + store %Array* %74, %Array** %126, align 8 + store %Array* %83, %Array** %128, align 8 + store %Array* %92, %Array** %130, align 8 + store %Array* %101, %Array** %132, align 8 + store %Array* %110, %Array** %134, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %135 = phi i64 [ 0, %continue__1 ], [ %140, %exiting__1 ] + %136 = icmp sle i64 %135, 7 + br i1 %136, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %135) + %138 = bitcast i8* %137 to %Array** + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %140 = add i64 %135, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %141 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 0) + %143 = bitcast i8* %142 to i64* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 1) + %145 = bitcast i8* %144 to i64* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 2) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 3) + %149 = bitcast i8* %148 to i64* + store i64 %p, i64* %143, align 4 + store i64 %q, i64* %145, align 4 + store i64 %r, i64* %147, align 4 + store i64 %s, i64* %149, align 4 + %150 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %141) + %151 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 0 + %sortedIndices = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 1) + %152 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 1 + %signs = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %153 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 2 + %globalSign = load double, double* %153, align 8 + %154 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %ops, %Array* %signs) + %155 = call i64 @__quantum__rt__array_get_size_1d(%Array* %154) + %156 = sub i64 %155, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %157 = phi i64 [ 0, %exit__1 ], [ %166, %exiting__2 ] + %158 = icmp sle i64 %157, %156 + br i1 %158, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %159 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %157) + %160 = bitcast i8* %159 to { %Array*, double }** + %161 = load { %Array*, double }*, { %Array*, double }** %160, align 8 + %162 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %161, i32 0, i32 0 + %op = load %Array*, %Array** %162, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %163 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %161, i32 0, i32 1 + %sign = load double, double* %163, align 8 + %164 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %164, %Array* %sortedIndices, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %165 = fmul double %globalSign, %sign + %theta = fmul double %165, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %pauliString, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %166 = add i64 %157, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %167 = phi i64 [ 0, %exit__2 ], [ %172, %exiting__3 ] + %168 = icmp sle i64 %167, 7 + br i1 %168, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %167) + %170 = bitcast i8* %169 to %Array** + %171 = load %Array*, %Array** %170, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %171, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %172 = add i64 %167, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %173 = phi i64 [ 0, %exit__3 ], [ %178, %exiting__4 ] + %174 = icmp sle i64 %173, 7 + br i1 %174, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %175 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %173) + %176 = bitcast i8* %175 to %Array** + %177 = load %Array*, %Array** %176, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %177, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %178 = add i64 %173, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %179 = bitcast { %Array*, %Array*, double }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %179, i32 -1) + %180 = sub i64 %155, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %181 = phi i64 [ 0, %exit__4 ], [ %189, %exiting__5 ] + %182 = icmp sle i64 %181, %180 + br i1 %182, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %181) + %184 = bitcast i8* %183 to { %Array*, double }** + %185 = load { %Array*, double }*, { %Array*, double }** %184, align 8 + %186 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %185, i32 0, i32 0 + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %187, i32 -1) + %188 = bitcast { %Array*, double }* %185 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %189 = add i64 %181, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + ret void +} + +define internal { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %indices) { +entry: + %sign = alloca double, align 8 + %signs = alloca %Array*, align 8 + %sorted = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %1 = bitcast i8* %0 to i64* + %p = load i64, i64* %1, align 4 + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %3 = bitcast i8* %2 to i64* + %q = load i64, i64* %3, align 4 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 2) + %5 = bitcast i8* %4 to i64* + %r = load i64, i64* %5, align 4 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %7 = bitcast i8* %6 to i64* + %s = load i64, i64* %7, align 4 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 0) + %10 = bitcast i8* %9 to i64* + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 1) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 2) + %14 = bitcast i8* %13 to i64* + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 3) + %16 = bitcast i8* %15 to i64* + store i64 0, i64* %10, align 4 + store i64 0, i64* %12, align 4 + store i64 0, i64* %14, align 4 + store i64 0, i64* %16, align 4 + store %Array* %8, %Array** %sorted, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to double* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to double* + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 2) + %23 = bitcast i8* %22 to double* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 3) + %25 = bitcast i8* %24 to double* + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 4) + %27 = bitcast i8* %26 to double* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 5) + %29 = bitcast i8* %28 to double* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 6) + %31 = bitcast i8* %30 to double* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 7) + %33 = bitcast i8* %32 to double* + store double 0.000000e+00, double* %19, align 8 + store double 0.000000e+00, double* %21, align 8 + store double 0.000000e+00, double* %23, align 8 + store double 0.000000e+00, double* %25, align 8 + store double 0.000000e+00, double* %27, align 8 + store double 0.000000e+00, double* %29, align 8 + store double 0.000000e+00, double* %31, align 8 + store double 0.000000e+00, double* %33, align 8 + store %Array* %17, %Array** %signs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + store double 1.000000e+00, double* %sign, align 8 + %34 = icmp sgt i64 %p, %q + br i1 %34, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + store double -1.000000e+00, double* %sign, align 8 + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %35 = icmp sgt i64 %r, %s + br i1 %35, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %36 = load double, double* %sign, align 8 + %37 = fmul double %36, -1.000000e+00 + store double %37, double* %sign, align 8 + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i64* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 1) + %42 = bitcast i8* %41 to i64* + store i64 %p, i64* %40, align 4 + store i64 %q, i64* %42, align 4 + %43 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %38) + %44 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 0) + %46 = bitcast i8* %45 to i64* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 1) + %48 = bitcast i8* %47 to i64* + store i64 %r, i64* %46, align 4 + store i64 %s, i64* %48, align 4 + %49 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %44) + %50 = icmp sgt i64 %43, %49 + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + br i1 %50, label %then0__3, label %else__1 + +then0__3: ; preds = %continue__2 + %51 = load double, double* %sign, align 8 + %52 = fmul double %51, -1.000000e+00 + store double %52, double* %sign, align 8 + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to i64* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 1) + %57 = bitcast i8* %56 to i64* + store i64 %r, i64* %55, align 4 + store i64 %s, i64* %57, align 4 + %58 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %53) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + %59 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 0) + %61 = bitcast i8* %60 to i64* + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 1) + %63 = bitcast i8* %62 to i64* + store i64 %r, i64* %61, align 4 + store i64 %s, i64* %63, align 4 + %64 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %59) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + %65 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i64* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i64* + store i64 %p, i64* %67, align 4 + store i64 %q, i64* %69, align 4 + %70 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %65) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + %71 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 0) + %73 = bitcast i8* %72 to i64* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 1) + %75 = bitcast i8* %74 to i64* + store i64 %p, i64* %73, align 4 + store i64 %q, i64* %75, align 4 + %76 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %71) + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + %77 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 0) + %79 = bitcast i8* %78 to i64* + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 1) + %81 = bitcast i8* %80 to i64* + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 2) + %83 = bitcast i8* %82 to i64* + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 3) + %85 = bitcast i8* %84 to i64* + store i64 %58, i64* %79, align 4 + store i64 %64, i64* %81, align 4 + store i64 %70, i64* %83, align 4 + store i64 %76, i64* %85, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %77, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + store %Array* %77, %Array** %sorted, align 8 + br label %continue__3 + +else__1: ; preds = %continue__2 + %86 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 0) + %88 = bitcast i8* %87 to i64* + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 1) + %90 = bitcast i8* %89 to i64* + store i64 %p, i64* %88, align 4 + store i64 %q, i64* %90, align 4 + %91 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %86) + call void @__quantum__rt__array_update_reference_count(%Array* %86, i32 -1) + %92 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i64* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i64* + store i64 %p, i64* %94, align 4 + store i64 %q, i64* %96, align 4 + %97 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %92) + call void @__quantum__rt__array_update_reference_count(%Array* %92, i32 -1) + %98 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 0) + %100 = bitcast i8* %99 to i64* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 1) + %102 = bitcast i8* %101 to i64* + store i64 %r, i64* %100, align 4 + store i64 %s, i64* %102, align 4 + %103 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %98) + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + %104 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 0) + %106 = bitcast i8* %105 to i64* + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 1) + %108 = bitcast i8* %107 to i64* + store i64 %r, i64* %106, align 4 + store i64 %s, i64* %108, align 4 + %109 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %104) + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + %110 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i64* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i64* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i64* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i64* + store i64 %91, i64* %112, align 4 + store i64 %97, i64* %114, align 4 + store i64 %103, i64* %116, align 4 + store i64 %109, i64* %118, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %110, i32 1) + %119 = load %Array*, %Array** %sorted, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 -1) + store %Array* %110, %Array** %sorted, align 8 + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + %120 = load %Array*, %Array** %sorted, align 8 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 0) + %122 = bitcast i8* %121 to i64* + %p1 = load i64, i64* %122, align 4 + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 1) + %124 = bitcast i8* %123 to i64* + %q1 = load i64, i64* %124, align 4 + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 2) + %126 = bitcast i8* %125 to i64* + %r1 = load i64, i64* %126, align 4 + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 3) + %128 = bitcast i8* %127 to i64* + %s1 = load i64, i64* %128, align 4 + %129 = icmp slt i64 %q1, %r1 + br i1 %129, label %then0__4, label %test1__1 + +then0__4: ; preds = %continue__3 + %130 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 0) + %132 = bitcast i8* %131 to i64* + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 1) + %134 = bitcast i8* %133 to i64* + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 2) + %136 = bitcast i8* %135 to i64* + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 3) + %138 = bitcast i8* %137 to i64* + store i64 %p1, i64* %132, align 4 + store i64 %q1, i64* %134, align 4 + store i64 %r1, i64* %136, align 4 + store i64 %s1, i64* %138, align 4 + %139 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %140 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 0) + %141 = bitcast i8* %140 to double* + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 1) + %143 = bitcast i8* %142 to double* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 2) + %145 = bitcast i8* %144 to double* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 3) + %147 = bitcast i8* %146 to double* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 4) + %149 = bitcast i8* %148 to double* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 5) + %151 = bitcast i8* %150 to double* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 6) + %153 = bitcast i8* %152 to double* + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 7) + %155 = bitcast i8* %154 to double* + store double 1.000000e+00, double* %141, align 8 + store double -1.000000e+00, double* %143, align 8 + store double -1.000000e+00, double* %145, align 8 + store double -1.000000e+00, double* %147, align 8 + store double 1.000000e+00, double* %149, align 8 + store double 1.000000e+00, double* %151, align 8 + store double 1.000000e+00, double* %153, align 8 + store double -1.000000e+00, double* %155, align 8 + %156 = load double, double* %sign, align 8 + %157 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %158 = bitcast %Tuple* %157 to { %Array*, %Array*, double }* + %159 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 0 + %160 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 1 + %161 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 2 + store %Array* %130, %Array** %159, align 8 + store %Array* %139, %Array** %160, align 8 + store double %156, double* %161, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %158 + +test1__1: ; preds = %continue__3 + %162 = icmp sgt i64 %q1, %r1 + br i1 %162, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %test1__1 + %163 = icmp slt i64 %q1, %s1 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %test1__1 + %164 = phi i1 [ %163, %condTrue__1 ], [ %162, %test1__1 ] + br i1 %164, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__1 + %165 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 0) + %167 = bitcast i8* %166 to i64* + %168 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 1) + %169 = bitcast i8* %168 to i64* + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 2) + %171 = bitcast i8* %170 to i64* + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 3) + %173 = bitcast i8* %172 to i64* + store i64 %p1, i64* %167, align 4 + store i64 %r1, i64* %169, align 4 + store i64 %q1, i64* %171, align 4 + store i64 %s1, i64* %173, align 4 + %174 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %175 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 0) + %176 = bitcast i8* %175 to double* + %177 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 1) + %178 = bitcast i8* %177 to double* + %179 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 2) + %180 = bitcast i8* %179 to double* + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 3) + %182 = bitcast i8* %181 to double* + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 4) + %184 = bitcast i8* %183 to double* + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 5) + %186 = bitcast i8* %185 to double* + %187 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 6) + %188 = bitcast i8* %187 to double* + %189 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 7) + %190 = bitcast i8* %189 to double* + store double -1.000000e+00, double* %176, align 8 + store double -1.000000e+00, double* %178, align 8 + store double -1.000000e+00, double* %180, align 8 + store double 1.000000e+00, double* %182, align 8 + store double -1.000000e+00, double* %184, align 8 + store double 1.000000e+00, double* %186, align 8 + store double 1.000000e+00, double* %188, align 8 + store double 1.000000e+00, double* %190, align 8 + %191 = load double, double* %sign, align 8 + %192 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %193 = bitcast %Tuple* %192 to { %Array*, %Array*, double }* + %194 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 0 + %195 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 1 + %196 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 2 + store %Array* %165, %Array** %194, align 8 + store %Array* %174, %Array** %195, align 8 + store double %191, double* %196, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %193 + +test2__1: ; preds = %condContinue__1 + %197 = icmp sgt i64 %q1, %r1 + br i1 %197, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %test2__1 + %198 = icmp sgt i64 %q1, %s1 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %test2__1 + %199 = phi i1 [ %198, %condTrue__2 ], [ %197, %test2__1 ] + br i1 %199, label %then2__1, label %else__2 + +then2__1: ; preds = %condContinue__2 + %200 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 0) + %202 = bitcast i8* %201 to i64* + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 1) + %204 = bitcast i8* %203 to i64* + %205 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 2) + %206 = bitcast i8* %205 to i64* + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 3) + %208 = bitcast i8* %207 to i64* + store i64 %p1, i64* %202, align 4 + store i64 %r1, i64* %204, align 4 + store i64 %s1, i64* %206, align 4 + store i64 %q1, i64* %208, align 4 + %209 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 0) + %211 = bitcast i8* %210 to double* + %212 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 1) + %213 = bitcast i8* %212 to double* + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 2) + %215 = bitcast i8* %214 to double* + %216 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 3) + %217 = bitcast i8* %216 to double* + %218 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 4) + %219 = bitcast i8* %218 to double* + %220 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 5) + %221 = bitcast i8* %220 to double* + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 6) + %223 = bitcast i8* %222 to double* + %224 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 7) + %225 = bitcast i8* %224 to double* + store double 1.000000e+00, double* %211, align 8 + store double 1.000000e+00, double* %213, align 8 + store double -1.000000e+00, double* %215, align 8 + store double 1.000000e+00, double* %217, align 8 + store double -1.000000e+00, double* %219, align 8 + store double 1.000000e+00, double* %221, align 8 + store double -1.000000e+00, double* %223, align 8 + store double -1.000000e+00, double* %225, align 8 + %226 = load double, double* %sign, align 8 + %227 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %228 = bitcast %Tuple* %227 to { %Array*, %Array*, double }* + %229 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 0 + %230 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 1 + %231 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 2 + store %Array* %200, %Array** %229, align 8 + store %Array* %209, %Array** %230, align 8 + store double %226, double* %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %228 + +else__2: ; preds = %condContinue__2 + %232 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @29, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__fail(%String* %232) + unreachable + +continue__4: ; No predecessors! + unreachable +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %nFermions, %Array* %idxFermions, %Array* %pauliReplacements) { +entry: + %pauliString = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliReplacements, i32 1) + %0 = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliZString__body(i64 %nFermions, %Array* %idxFermions) + store %Array* %0, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %1 = call %Range @Microsoft__Quantum__Arrays___f06ffffa287a47609acf501f61b4d290_IndexRange__body(%Array* %idxFermions) + %2 = extractvalue %Range %1, 0 + %3 = extractvalue %Range %1, 1 + %4 = extractvalue %Range %1, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %5 = icmp sgt i64 %3, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %2, %preheader__1 ], [ %17, %exiting__1 ] + %6 = icmp sle i64 %idx, %4 + %7 = icmp sge i64 %idx, %4 + %8 = select i1 %5, i1 %6, i1 %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %idx) + %10 = bitcast i8* %9 to i64* + %idxFermion = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %pauliReplacements, i64 %idx) + %12 = bitcast i8* %11 to i2* + %op = load i2, i2* %12, align 1 + %13 = load %Array*, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + %14 = call %Array* @__quantum__rt__array_copy(%Array* %13, i1 false) + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %idxFermion) + %16 = bitcast i8* %15 to i2* + store i2 %op, i2* %16, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + store %Array* %14, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %idx, %3 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = load %Array*, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliReplacements, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + ret %Array* %18 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQRSTerm____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__p__ = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %__qsVar4__q__ = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %12 = bitcast i8* %11 to i64* + %__qsVar5__r__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %14 = bitcast i8* %13 to i64* + %__qsVar6__s__ = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %18 = fmul double 1.250000e-01, %17 + %__qsVar7__angle__ = fmul double %18, %stepSize + %19 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %19, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %20 = icmp eq i64 %__qsVar3__p__, %__qsVar5__r__ + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %21 = phi i1 [ %19, %entry ], [ %20, %condFalse__1 ] + br i1 %21, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %22 = icmp eq i64 %__qsVar3__p__, %__qsVar6__s__ + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %23 = phi i1 [ %21, %condContinue__1 ], [ %22, %condFalse__2 ] + br i1 %23, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %24 = icmp eq i64 %__qsVar4__q__, %__qsVar5__r__ + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %25 = phi i1 [ %23, %condContinue__2 ], [ %24, %condFalse__3 ] + br i1 %25, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %26 = icmp eq i64 %__qsVar4__q__, %__qsVar6__s__ + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %27 = phi i1 [ %25, %condContinue__3 ], [ %26, %condFalse__4 ] + br i1 %27, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %28 = icmp eq i64 %__qsVar5__r__, %__qsVar6__s__ + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %29 = phi i1 [ %27, %condContinue__4 ], [ %28, %condFalse__5 ] + br i1 %29, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %31 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar5__r__) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar6__s__) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %46) + unreachable + +continue__1: ; preds = %condContinue__5 + %47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 1) + %51 = bitcast i8* %50 to i2* + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 2) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 3) + %55 = bitcast i8* %54 to i2* + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + store i2 1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i2* + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 2) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 3) + %64 = bitcast i8* %63 to i2* + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + store i2 1, i2* %62, align 1 + store i2 -1, i2* %64, align 1 + %65 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i2* + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 2) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 3) + %73 = bitcast i8* %72 to i2* + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + store i2 -1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + %74 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 2) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 3) + %82 = bitcast i8* %81 to i2* + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + store i2 -1, i2* %80, align 1 + store i2 -1, i2* %82, align 1 + %83 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 0) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 1) + %87 = bitcast i8* %86 to i2* + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 2) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 3) + %91 = bitcast i8* %90 to i2* + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + store i2 1, i2* %89, align 1 + store i2 1, i2* %91, align 1 + %92 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i2* + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 2) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 3) + %100 = bitcast i8* %99 to i2* + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + store i2 1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + %101 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i2* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i2* + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 2) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 3) + %109 = bitcast i8* %108 to i2* + store i2 -1, i2* %103, align 1 + store i2 -1, i2* %105, align 1 + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + %110 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i2* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i2* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %112, align 1 + store i2 1, i2* %114, align 1 + store i2 -1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %__qsVar10__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 0) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 1) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 2) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 3) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 4) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 5) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 6) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 7) + %134 = bitcast i8* %133 to %Array** + store %Array* %47, %Array** %120, align 8 + store %Array* %56, %Array** %122, align 8 + store %Array* %65, %Array** %124, align 8 + store %Array* %74, %Array** %126, align 8 + store %Array* %83, %Array** %128, align 8 + store %Array* %92, %Array** %130, align 8 + store %Array* %101, %Array** %132, align 8 + store %Array* %110, %Array** %134, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %135 = phi i64 [ 0, %continue__1 ], [ %140, %exiting__1 ] + %136 = icmp sle i64 %135, 7 + br i1 %136, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %135) + %138 = bitcast i8* %137 to %Array** + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %140 = add i64 %135, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 1) + %141 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 0) + %143 = bitcast i8* %142 to i64* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 1) + %145 = bitcast i8* %144 to i64* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 2) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 3) + %149 = bitcast i8* %148 to i64* + store i64 %__qsVar3__p__, i64* %143, align 4 + store i64 %__qsVar4__q__, i64* %145, align 4 + store i64 %__qsVar5__r__, i64* %147, align 4 + store i64 %__qsVar6__s__, i64* %149, align 4 + %150 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %141) + %151 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 0 + %__qsVar11__sortedIndices__ = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 1) + %152 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 1 + %__qsVar12__signs__ = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 1) + %153 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 2 + %__qsVar13__globalSign__ = load double, double* %153, align 8 + %154 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %155 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %156 = call i64 @__quantum__rt__array_get_size_1d(%Array* %155) + %157 = sub i64 %156, 1 + %158 = insertvalue %Range zeroinitializer, i64 %157, 0 + %159 = insertvalue %Range %158, i64 -1, 1 + %160 = insertvalue %Range %159, i64 0, 2 + %161 = call %Array* @__quantum__rt__array_slice_1d(%Array* %154, %Range %160, i1 true) + %162 = call i64 @__quantum__rt__array_get_size_1d(%Array* %161) + %163 = sub i64 %162, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %164 = phi i64 [ 0, %exit__1 ], [ %173, %exiting__2 ] + %165 = icmp sle i64 %164, %163 + br i1 %165, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 %164) + %167 = bitcast i8* %166 to { %Array*, double }** + %168 = load { %Array*, double }*, { %Array*, double }** %167, align 8 + %169 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %168, i32 0, i32 0 + %__qsVar14__op__ = load %Array*, %Array** %169, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 1) + %170 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %168, i32 0, i32 1 + %__qsVar15__sign__ = load double, double* %170, align 8 + %171 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar16__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %171, %Array* %__qsVar11__sortedIndices__, %Array* %__qsVar14__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + %172 = fmul double %__qsVar13__globalSign__, %__qsVar15__sign__ + %theta = fmul double %172, %__qsVar7__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %__qsVar16__pauliString__, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %173 = add i64 %164, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %174 = phi i64 [ 0, %exit__2 ], [ %179, %exiting__3 ] + %175 = icmp sle i64 %174, 7 + br i1 %175, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %176 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %174) + %177 = bitcast i8* %176 to %Array** + %178 = load %Array*, %Array** %177, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %178, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %179 = add i64 %174, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %180 = phi i64 [ 0, %exit__3 ], [ %185, %exiting__4 ] + %181 = icmp sle i64 %180, 7 + br i1 %181, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %180) + %183 = bitcast i8* %182 to %Array** + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %184, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %185 = add i64 %180, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar12__signs__, i32 -1) + %186 = bitcast { %Array*, %Array*, double }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %186, i32 -1) + %187 = call i64 @__quantum__rt__array_get_size_1d(%Array* %154) + %188 = sub i64 %187, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %189 = phi i64 [ 0, %exit__4 ], [ %197, %exiting__5 ] + %190 = icmp sle i64 %189, %188 + br i1 %190, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %189) + %192 = bitcast i8* %191 to { %Array*, double }** + %193 = load { %Array*, double }*, { %Array*, double }** %192, align 8 + %194 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %193, i32 0, i32 0 + %195 = load %Array*, %Array** %194, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %195, i32 -1) + %196 = bitcast { %Array*, double }* %193 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %196, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %197 = add i64 %189, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + %198 = sub i64 %156, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %199 = phi i64 [ 0, %exit__5 ], [ %207, %exiting__6 ] + %200 = icmp sle i64 %199, %198 + br i1 %200, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %155, i64 %199) + %202 = bitcast i8* %201 to { %Array*, double }** + %203 = load { %Array*, double }*, { %Array*, double }** %202, align 8 + %204 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %203, i32 0, i32 0 + %205 = load %Array*, %Array** %204, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %205, i32 -1) + %206 = bitcast { %Array*, double }* %203 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %206, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %207 = add i64 %199, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %155, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQRSTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %p = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %q = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %16 = bitcast i8* %15 to i64* + %r = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %18 = bitcast i8* %17 to i64* + %s = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = fmul double 1.250000e-01, %21 + %angle = fmul double %22, %stepSize + %23 = icmp eq i64 %p, %q + br i1 %23, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %24 = icmp eq i64 %p, %r + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %25 = phi i1 [ %23, %entry ], [ %24, %condFalse__1 ] + br i1 %25, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %26 = icmp eq i64 %p, %s + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %27 = phi i1 [ %25, %condContinue__1 ], [ %26, %condFalse__2 ] + br i1 %27, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %28 = icmp eq i64 %q, %r + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %29 = phi i1 [ %27, %condContinue__2 ], [ %28, %condFalse__3 ] + br i1 %29, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %30 = icmp eq i64 %q, %s + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %31 = phi i1 [ %29, %condContinue__3 ], [ %30, %condFalse__4 ] + br i1 %31, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %32 = icmp eq i64 %r, %s + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %33 = phi i1 [ %31, %condContinue__4 ], [ %32, %condFalse__5 ] + br i1 %33, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__int_to_string(i64 %p) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %q) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %r) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__int_to_string(i64 %s) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %50) + unreachable + +continue__1: ; preds = %condContinue__5 + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 -1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 -1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 -1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 -1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 1, i2* %93, align 1 + store i2 1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 -1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %139 = phi i64 [ 0, %continue__1 ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %145 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 0) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 1) + %149 = bitcast i8* %148 to i64* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 2) + %151 = bitcast i8* %150 to i64* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 3) + %153 = bitcast i8* %152 to i64* + store i64 %p, i64* %147, align 4 + store i64 %q, i64* %149, align 4 + store i64 %r, i64* %151, align 4 + store i64 %s, i64* %153, align 4 + %154 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %145) + %155 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 0 + %sortedIndices = load %Array*, %Array** %155, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 1) + %156 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 1 + %signs = load %Array*, %Array** %156, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %157 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 2 + %globalSign = load double, double* %157, align 8 + %158 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %ops, %Array* %signs) + %159 = call i64 @__quantum__rt__array_get_size_1d(%Array* %158) + %160 = sub i64 %159, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %161 = phi i64 [ 0, %exit__1 ], [ %175, %exiting__2 ] + %162 = icmp sle i64 %161, %160 + br i1 %162, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %163 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %161) + %164 = bitcast i8* %163 to { %Array*, double }** + %165 = load { %Array*, double }*, { %Array*, double }** %164, align 8 + %166 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %165, i32 0, i32 0 + %op = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %167 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %165, i32 0, i32 1 + %sign = load double, double* %167, align 8 + %168 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %168, %Array* %sortedIndices, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %169 = fmul double %globalSign, %sign + %theta = fmul double %169, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %170 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %171 = bitcast %Tuple* %170 to { %Array*, double, %Array* }* + %172 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 0 + %173 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 1 + %174 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 2 + store %Array* %pauliString, %Array** %172, align 8 + store double %theta, double* %173, align 8 + store %Array* %qubits, %Array** %174, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %171) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %170, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %175 = add i64 %161, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %176 = phi i64 [ 0, %exit__2 ], [ %181, %exiting__3 ] + %177 = icmp sle i64 %176, 7 + br i1 %177, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %176) + %179 = bitcast i8* %178 to %Array** + %180 = load %Array*, %Array** %179, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %180, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %181 = add i64 %176, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %182 = phi i64 [ 0, %exit__3 ], [ %187, %exiting__4 ] + %183 = icmp sle i64 %182, 7 + br i1 %183, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %184 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %182) + %185 = bitcast i8* %184 to %Array** + %186 = load %Array*, %Array** %185, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %186, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %187 = add i64 %182, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %188 = bitcast { %Array*, %Array*, double }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + %189 = sub i64 %159, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %190 = phi i64 [ 0, %exit__4 ], [ %198, %exiting__5 ] + %191 = icmp sle i64 %190, %189 + br i1 %191, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %190) + %193 = bitcast i8* %192 to { %Array*, double }** + %194 = load { %Array*, double }*, { %Array*, double }** %193, align 8 + %195 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %194, i32 0, i32 0 + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %196, i32 -1) + %197 = bitcast { %Array*, double }* %194 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %197, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %198 = add i64 %190, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %158, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQRSTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__p__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %__qsVar4__q__ = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %16 = bitcast i8* %15 to i64* + %__qsVar5__r__ = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %18 = bitcast i8* %17 to i64* + %__qsVar6__s__ = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = fmul double 1.250000e-01, %21 + %__qsVar7__angle__ = fmul double %22, %stepSize + %23 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %23, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %24 = icmp eq i64 %__qsVar3__p__, %__qsVar5__r__ + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %25 = phi i1 [ %23, %entry ], [ %24, %condFalse__1 ] + br i1 %25, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %26 = icmp eq i64 %__qsVar3__p__, %__qsVar6__s__ + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %27 = phi i1 [ %25, %condContinue__1 ], [ %26, %condFalse__2 ] + br i1 %27, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %28 = icmp eq i64 %__qsVar4__q__, %__qsVar5__r__ + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %29 = phi i1 [ %27, %condContinue__2 ], [ %28, %condFalse__3 ] + br i1 %29, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %30 = icmp eq i64 %__qsVar4__q__, %__qsVar6__s__ + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %31 = phi i1 [ %29, %condContinue__3 ], [ %30, %condFalse__4 ] + br i1 %31, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %32 = icmp eq i64 %__qsVar5__r__, %__qsVar6__s__ + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %33 = phi i1 [ %31, %condContinue__4 ], [ %32, %condFalse__5 ] + br i1 %33, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar5__r__) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar6__s__) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %50) + unreachable + +continue__1: ; preds = %condContinue__5 + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 -1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 -1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 -1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 -1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 1, i2* %93, align 1 + store i2 1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 -1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %__qsVar10__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %139 = phi i64 [ 0, %continue__1 ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 1) + %145 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 0) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 1) + %149 = bitcast i8* %148 to i64* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 2) + %151 = bitcast i8* %150 to i64* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 3) + %153 = bitcast i8* %152 to i64* + store i64 %__qsVar3__p__, i64* %147, align 4 + store i64 %__qsVar4__q__, i64* %149, align 4 + store i64 %__qsVar5__r__, i64* %151, align 4 + store i64 %__qsVar6__s__, i64* %153, align 4 + %154 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %145) + %155 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 0 + %__qsVar11__sortedIndices__ = load %Array*, %Array** %155, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 1) + %156 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 1 + %__qsVar12__signs__ = load %Array*, %Array** %156, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 1) + %157 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 2 + %__qsVar13__globalSign__ = load double, double* %157, align 8 + %158 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %159 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %160 = call i64 @__quantum__rt__array_get_size_1d(%Array* %159) + %161 = sub i64 %160, 1 + %162 = insertvalue %Range zeroinitializer, i64 %161, 0 + %163 = insertvalue %Range %162, i64 -1, 1 + %164 = insertvalue %Range %163, i64 0, 2 + %165 = call %Array* @__quantum__rt__array_slice_1d(%Array* %158, %Range %164, i1 true) + %166 = call i64 @__quantum__rt__array_get_size_1d(%Array* %165) + %167 = sub i64 %166, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %168 = phi i64 [ 0, %exit__1 ], [ %182, %exiting__2 ] + %169 = icmp sle i64 %168, %167 + br i1 %169, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 %168) + %171 = bitcast i8* %170 to { %Array*, double }** + %172 = load { %Array*, double }*, { %Array*, double }** %171, align 8 + %173 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %172, i32 0, i32 0 + %__qsVar14__op__ = load %Array*, %Array** %173, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 1) + %174 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %172, i32 0, i32 1 + %__qsVar15__sign__ = load double, double* %174, align 8 + %175 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar16__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %175, %Array* %__qsVar11__sortedIndices__, %Array* %__qsVar14__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + %176 = fmul double %__qsVar13__globalSign__, %__qsVar15__sign__ + %theta = fmul double %176, %__qsVar7__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %177 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %178 = bitcast %Tuple* %177 to { %Array*, double, %Array* }* + %179 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 0 + %180 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 1 + %181 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 2 + store %Array* %__qsVar16__pauliString__, %Array** %179, align 8 + store double %theta, double* %180, align 8 + store %Array* %qubits, %Array** %181, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %178) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %177, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %182 = add i64 %168, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %183 = phi i64 [ 0, %exit__2 ], [ %188, %exiting__3 ] + %184 = icmp sle i64 %183, 7 + br i1 %184, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %183) + %186 = bitcast i8* %185 to %Array** + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %187, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %188 = add i64 %183, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %189 = phi i64 [ 0, %exit__3 ], [ %194, %exiting__4 ] + %190 = icmp sle i64 %189, 7 + br i1 %190, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %189) + %192 = bitcast i8* %191 to %Array** + %193 = load %Array*, %Array** %192, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %193, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %194 = add i64 %189, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar12__signs__, i32 -1) + %195 = bitcast { %Array*, %Array*, double }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %195, i32 -1) + %196 = call i64 @__quantum__rt__array_get_size_1d(%Array* %158) + %197 = sub i64 %196, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %198 = phi i64 [ 0, %exit__4 ], [ %206, %exiting__5 ] + %199 = icmp sle i64 %198, %197 + br i1 %199, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %200 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %198) + %201 = bitcast i8* %200 to { %Array*, double }** + %202 = load { %Array*, double }*, { %Array*, double }** %201, align 8 + %203 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %202, i32 0, i32 0 + %204 = load %Array*, %Array** %203, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %204, i32 -1) + %205 = bitcast { %Array*, double }* %202 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %205, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %206 = add i64 %198, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %158, i32 -1) + %207 = sub i64 %160, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %208 = phi i64 [ 0, %exit__5 ], [ %216, %exiting__6 ] + %209 = icmp sle i64 %208, %207 + br i1 %209, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %159, i64 %208) + %211 = bitcast i8* %210 to { %Array*, double }** + %212 = load { %Array*, double }*, { %Array*, double }** %211, align 8 + %213 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %212, i32 0, i32 0 + %214 = load %Array*, %Array** %213, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %214, i32 -1) + %215 = bitcast { %Array*, double }* %212 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %215, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %216 = add i64 %208, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %159, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %165, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQTerm____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %p = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %q = load i64, i64* %10, align 4 + %11 = icmp eq i64 %p, %q + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__int_to_string(i64 %p) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__int_to_string(i64 %q) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__1: ; preds = %entry + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %22 = bitcast i8* %21 to double* + %23 = load double, double* %22, align 8 + %24 = fmul double 5.000000e-01, %23 + %angle = fmul double %24, %stepSize + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 1, i2* %27, align 1 + store i2 -1, i2* %29, align 1 + %30 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 0) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 1) + %34 = bitcast i8* %33 to i2* + store i2 -1, i2* %32, align 1 + store i2 1, i2* %34, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %36 = bitcast i8* %35 to %Array** + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %38 = bitcast i8* %37 to %Array** + store %Array* %25, %Array** %36, align 8 + store %Array* %30, %Array** %38, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %44, %exiting__1 ] + %40 = icmp sle i64 %39, 1 + br i1 %40, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %39) + %42 = bitcast i8* %41 to %Array** + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %44 = add i64 %39, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %signs = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 0) + %46 = bitcast i8* %45 to double* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 1) + %48 = bitcast i8* %47 to double* + store double 1.000000e+00, double* %46, align 8 + store double -1.000000e+00, double* %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %ops, %Array* %signs) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %52 = phi i64 [ 0, %exit__1 ], [ %60, %exiting__2 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { %Array*, double }** + %56 = load { %Array*, double }*, { %Array*, double }** %55, align 8 + %57 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %56, i32 0, i32 0 + %op = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %58 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %56, i32 0, i32 1 + %sign = load double, double* %58, align 8 + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %59, %Array* %idxFermions, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %theta = fmul double %sign, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %pauliString, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %52, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %61 = phi i64 [ 0, %exit__2 ], [ %66, %exiting__3 ] + %62 = icmp sle i64 %61, 1 + br i1 %62, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %61) + %64 = bitcast i8* %63 to %Array** + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %66 = add i64 %61, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %67 = phi i64 [ 0, %exit__3 ], [ %72, %exiting__4 ] + %68 = icmp sle i64 %67, 1 + br i1 %68, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %67) + %70 = bitcast i8* %69 to %Array** + %71 = load %Array*, %Array** %70, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %72 = add i64 %67, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %73 = sub i64 %50, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %74 = phi i64 [ 0, %exit__4 ], [ %82, %exiting__5 ] + %75 = icmp sle i64 %74, %73 + br i1 %75, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %74) + %77 = bitcast i8* %76 to { %Array*, double }** + %78 = load { %Array*, double }*, { %Array*, double }** %77, align 8 + %79 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %78, i32 0, i32 0 + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + %81 = bitcast { %Array*, double }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %81, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %82 = add i64 %74, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQTerm____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__p__ = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %__qsVar4__q__ = load i64, i64* %10, align 4 + %11 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__1: ; preds = %entry + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %22 = bitcast i8* %21 to double* + %23 = load double, double* %22, align 8 + %24 = fmul double 5.000000e-01, %23 + %__qsVar5__angle__ = fmul double %24, %stepSize + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 1, i2* %27, align 1 + store i2 -1, i2* %29, align 1 + %30 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 0) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 1) + %34 = bitcast i8* %33 to i2* + store i2 -1, i2* %32, align 1 + store i2 1, i2* %34, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %36 = bitcast i8* %35 to %Array** + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %38 = bitcast i8* %37 to %Array** + store %Array* %25, %Array** %36, align 8 + store %Array* %30, %Array** %38, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %44, %exiting__1 ] + %40 = icmp sle i64 %39, 1 + br i1 %40, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %39) + %42 = bitcast i8* %41 to %Array** + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %44 = add i64 %39, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %__qsVar7__signs__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 0) + %46 = bitcast i8* %45 to double* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 1) + %48 = bitcast i8* %47 to double* + store double 1.000000e+00, double* %46, align 8 + store double -1.000000e+00, double* %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %50 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %51 = call i64 @__quantum__rt__array_get_size_1d(%Array* %50) + %52 = sub i64 %51, 1 + %53 = insertvalue %Range zeroinitializer, i64 %52, 0 + %54 = insertvalue %Range %53, i64 -1, 1 + %55 = insertvalue %Range %54, i64 0, 2 + %56 = call %Array* @__quantum__rt__array_slice_1d(%Array* %49, %Range %55, i1 true) + %57 = call i64 @__quantum__rt__array_get_size_1d(%Array* %56) + %58 = sub i64 %57, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %59 = phi i64 [ 0, %exit__1 ], [ %67, %exiting__2 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %59) + %62 = bitcast i8* %61 to { %Array*, double }** + %63 = load { %Array*, double }*, { %Array*, double }** %62, align 8 + %64 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 0 + %__qsVar8__op__ = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %65 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 1 + %__qsVar9__sign__ = load double, double* %65, align 8 + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar10__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %66, %Array* %__qsVar2__idxFermions__, %Array* %__qsVar8__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + %theta = fmul double %__qsVar9__sign__, %__qsVar5__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %__qsVar10__pauliString__, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %67 = add i64 %59, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %68 = phi i64 [ 0, %exit__2 ], [ %73, %exiting__3 ] + %69 = icmp sle i64 %68, 1 + br i1 %69, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %68) + %71 = bitcast i8* %70 to %Array** + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %73 = add i64 %68, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %74 = phi i64 [ 0, %exit__3 ], [ %79, %exiting__4 ] + %75 = icmp sle i64 %74, 1 + br i1 %75, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %74) + %77 = bitcast i8* %76 to %Array** + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %79 = add i64 %74, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__signs__, i32 -1) + %80 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %81 = sub i64 %80, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %82 = phi i64 [ 0, %exit__4 ], [ %90, %exiting__5 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %82) + %85 = bitcast i8* %84 to { %Array*, double }** + %86 = load { %Array*, double }*, { %Array*, double }** %85, align 8 + %87 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %88, i32 -1) + %89 = bitcast { %Array*, double }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %89, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %90 = add i64 %82, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %91 = sub i64 %51, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %92 = phi i64 [ 0, %exit__5 ], [ %100, %exiting__6 ] + %93 = icmp sle i64 %92, %91 + br i1 %93, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %92) + %95 = bitcast i8* %94 to { %Array*, double }** + %96 = load { %Array*, double }*, { %Array*, double }** %95, align 8 + %97 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %96, i32 0, i32 0 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + %99 = bitcast { %Array*, double }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %99, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %100 = add i64 %92, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %p = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %q = load i64, i64* %14, align 4 + %15 = icmp eq i64 %p, %q + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__int_to_string(i64 %p) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__int_to_string(i64 %q) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %24 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %23) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %24) + unreachable + +continue__1: ; preds = %entry + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %26 = bitcast i8* %25 to double* + %27 = load double, double* %26, align 8 + %28 = fmul double 5.000000e-01, %27 + %angle = fmul double %28, %stepSize + %29 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 0) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 1) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %31, align 1 + store i2 -1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + store i2 -1, i2* %36, align 1 + store i2 1, i2* %38, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %40 = bitcast i8* %39 to %Array** + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %42 = bitcast i8* %41 to %Array** + store %Array* %29, %Array** %40, align 8 + store %Array* %34, %Array** %42, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %43 = phi i64 [ 0, %continue__1 ], [ %48, %exiting__1 ] + %44 = icmp sle i64 %43, 1 + br i1 %44, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %48 = add i64 %43, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %signs = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 0) + %50 = bitcast i8* %49 to double* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 1) + %52 = bitcast i8* %51 to double* + store double 1.000000e+00, double* %50, align 8 + store double -1.000000e+00, double* %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %53 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %ops, %Array* %signs) + %54 = call i64 @__quantum__rt__array_get_size_1d(%Array* %53) + %55 = sub i64 %54, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %56 = phi i64 [ 0, %exit__1 ], [ %69, %exiting__2 ] + %57 = icmp sle i64 %56, %55 + br i1 %57, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %56) + %59 = bitcast i8* %58 to { %Array*, double }** + %60 = load { %Array*, double }*, { %Array*, double }** %59, align 8 + %61 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %60, i32 0, i32 0 + %op = load %Array*, %Array** %61, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %62 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %60, i32 0, i32 1 + %sign = load double, double* %62, align 8 + %63 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %63, %Array* %idxFermions, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %theta = fmul double %sign, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { %Array*, double, %Array* }* + %66 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 2 + store %Array* %pauliString, %Array** %66, align 8 + store double %theta, double* %67, align 8 + store %Array* %qubits, %Array** %68, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %65) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %69 = add i64 %56, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %70 = phi i64 [ 0, %exit__2 ], [ %75, %exiting__3 ] + %71 = icmp sle i64 %70, 1 + br i1 %71, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %70) + %73 = bitcast i8* %72 to %Array** + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %75 = add i64 %70, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %76 = phi i64 [ 0, %exit__3 ], [ %81, %exiting__4 ] + %77 = icmp sle i64 %76, 1 + br i1 %77, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %76) + %79 = bitcast i8* %78 to %Array** + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %81 = add i64 %76, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %82 = sub i64 %54, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %83 = phi i64 [ 0, %exit__4 ], [ %91, %exiting__5 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %83) + %86 = bitcast i8* %85 to { %Array*, double }** + %87 = load { %Array*, double }*, { %Array*, double }** %86, align 8 + %88 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 -1) + %90 = bitcast { %Array*, double }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %90, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %91 = add i64 %83, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__p__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %__qsVar4__q__ = load i64, i64* %14, align 4 + %15 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %24 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %23) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %24) + unreachable + +continue__1: ; preds = %entry + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %26 = bitcast i8* %25 to double* + %27 = load double, double* %26, align 8 + %28 = fmul double 5.000000e-01, %27 + %__qsVar5__angle__ = fmul double %28, %stepSize + %29 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 0) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 1) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %31, align 1 + store i2 -1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + store i2 -1, i2* %36, align 1 + store i2 1, i2* %38, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %40 = bitcast i8* %39 to %Array** + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %42 = bitcast i8* %41 to %Array** + store %Array* %29, %Array** %40, align 8 + store %Array* %34, %Array** %42, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %43 = phi i64 [ 0, %continue__1 ], [ %48, %exiting__1 ] + %44 = icmp sle i64 %43, 1 + br i1 %44, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %48 = add i64 %43, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %__qsVar7__signs__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 0) + %50 = bitcast i8* %49 to double* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 1) + %52 = bitcast i8* %51 to double* + store double 1.000000e+00, double* %50, align 8 + store double -1.000000e+00, double* %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 1) + %53 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %54 = call %Array* @Microsoft__Quantum__Arrays___348fb71df7bc448eb164e2c392c91e93_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %55 = call i64 @__quantum__rt__array_get_size_1d(%Array* %54) + %56 = sub i64 %55, 1 + %57 = insertvalue %Range zeroinitializer, i64 %56, 0 + %58 = insertvalue %Range %57, i64 -1, 1 + %59 = insertvalue %Range %58, i64 0, 2 + %60 = call %Array* @__quantum__rt__array_slice_1d(%Array* %53, %Range %59, i1 true) + %61 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %62 = sub i64 %61, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %63 = phi i64 [ 0, %exit__1 ], [ %76, %exiting__2 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %63) + %66 = bitcast i8* %65 to { %Array*, double }** + %67 = load { %Array*, double }*, { %Array*, double }** %66, align 8 + %68 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %67, i32 0, i32 0 + %__qsVar8__op__ = load %Array*, %Array** %68, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %69 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %67, i32 0, i32 1 + %__qsVar9__sign__ = load double, double* %69, align 8 + %70 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar10__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %70, %Array* %__qsVar2__idxFermions__, %Array* %__qsVar8__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + %theta = fmul double %__qsVar9__sign__, %__qsVar5__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { %Array*, double, %Array* }* + %73 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 1 + %75 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 2 + store %Array* %__qsVar10__pauliString__, %Array** %73, align 8 + store double %theta, double* %74, align 8 + store %Array* %qubits, %Array** %75, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %72) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %63, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %77 = phi i64 [ 0, %exit__2 ], [ %82, %exiting__3 ] + %78 = icmp sle i64 %77, 1 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %77) + %80 = bitcast i8* %79 to %Array** + %81 = load %Array*, %Array** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %82 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %83 = phi i64 [ 0, %exit__3 ], [ %88, %exiting__4 ] + %84 = icmp sle i64 %83, 1 + br i1 %84, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %83) + %86 = bitcast i8* %85 to %Array** + %87 = load %Array*, %Array** %86, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %88 = add i64 %83, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__signs__, i32 -1) + %89 = call i64 @__quantum__rt__array_get_size_1d(%Array* %53) + %90 = sub i64 %89, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %91 = phi i64 [ 0, %exit__4 ], [ %99, %exiting__5 ] + %92 = icmp sle i64 %91, %90 + br i1 %92, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %91) + %94 = bitcast i8* %93 to { %Array*, double }** + %95 = load { %Array*, double }*, { %Array*, double }** %94, align 8 + %96 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %95, i32 0, i32 0 + %97 = load %Array*, %Array** %96, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %97, i32 -1) + %98 = bitcast { %Array*, double }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %99 = add i64 %91, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + %100 = sub i64 %55, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %101 = phi i64 [ 0, %exit__5 ], [ %109, %exiting__6 ] + %102 = icmp sle i64 %101, %100 + br i1 %102, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %54, i64 %101) + %104 = bitcast i8* %103 to { %Array*, double }** + %105 = load { %Array*, double }*, { %Array*, double }** %104, align 8 + %106 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %105, i32 0, i32 0 + %107 = load %Array*, %Array** %106, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %107, i32 -1) + %108 = bitcast { %Array*, double }* %105 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %108, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %109 = add i64 %101, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorFunction____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__36__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__21__FunctionTable, %Tuple* %11) + %16 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret { %Callable* }* %16 +} + +define internal void @Lifted__PartialApplication__36__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____body({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____adj({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____ctl(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____ctladj(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__21__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__21__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %8 = bitcast i8* %7 to i64* + %termType = load i64, i64* %8, align 4 + %9 = icmp eq i64 %termType, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQTerm____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %termType, 2 + br i1 %10, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQRSTerm____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__termType__ = load i64, i64* %8, align 4 + %9 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQTerm____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %10, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQRSTerm____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %12 = bitcast i8* %11 to i64* + %termType = load i64, i64* %12, align 4 + %13 = icmp eq i64 %termType, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %termType, 2 + br i1 %19, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQRSTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorImpl____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__termType__ = load i64, i64* %12, align 4 + %13 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %19, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___ApplyJordanWignerClusterOperatorPQRSTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___PrepareSingleConfigurationalStateSingleSiteOccupation____body(%Array* %qubitIndices) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array* }* + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %qubitIndices, %Array** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__37__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__22__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + ret %Callable* %5 +} + +define internal void @Lifted__PartialApplication__37__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__22__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__22__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %qubitIndices, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___00d673fd4b4c4c47a9ef358c9e077e10_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + call void @Microsoft__Quantum__Canon___9b30dd7f42984964b38a96887fa617c5_ApplyToEachCA__body(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj(%Array* %qubitIndices, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___00d673fd4b4c4c47a9ef358c9e077e10_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + call void @Microsoft__Quantum__Canon___9b30dd7f42984964b38a96887fa617c5_ApplyToEachCA__adj(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %qubitIndices = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___00d673fd4b4c4c47a9ef358c9e077e10_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___9b30dd7f42984964b38a96887fa617c5_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %qubitIndices = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___00d673fd4b4c4c47a9ef358c9e077e10_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___9b30dd7f42984964b38a96887fa617c5_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4__JordanWignerStateAsGeneratorIndex____body(%Array* %data, i64 %idx) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %idx) + %15 = bitcast i8* %14 to { { double, double }*, %Array* }** + %16 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %16, i32 0, i32 0 + %18 = load { double, double }*, { double, double }** %17, align 8 + %19 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 0 + %real = load double, double* %19, align 8 + %20 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 1 + %imaginary = load double, double* %20, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %16, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %23 = icmp eq i64 %22, 2 + br i1 %23, label %then0__1, label %test1__1 + +then0__1: ; preds = %exit__1 + %24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 0) + %26 = bitcast i8* %25 to i64* + store i64 0, i64* %26, align 4 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 0) + %29 = bitcast i8* %28 to double* + store double %real, double* %29, align 8 + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array*, %Array* }* + %32 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 1 + store %Array* %24, %Array** %32, align 8 + store %Array* %27, %Array** %33, align 8 + %34 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %31, %Array* %idxFermions) + %35 = sub i64 %0, 1 + br label %header__2 + +test1__1: ; preds = %exit__1 + %36 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %37 = icmp eq i64 %36, 4 + br i1 %37, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i64* + store i64 2, i64* %40, align 4 + %41 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 0) + %43 = bitcast i8* %42 to double* + store double %real, double* %43, align 8 + %44 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %45 = bitcast %Tuple* %44 to { %Array*, %Array* }* + %46 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %45, i32 0, i32 0 + %47 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %45, i32 0, i32 1 + store %Array* %38, %Array** %46, align 8 + store %Array* %41, %Array** %47, align 8 + %48 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %45, %Array* %idxFermions) + %49 = sub i64 %0, 1 + br label %header__3 + +else__1: ; preds = %test1__1 + %50 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 0) + %52 = bitcast i8* %51 to i64* + store i64 -1, i64* %52, align 4 + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to double* + store double 0.000000e+00, double* %55, align 8 + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { %Array*, %Array* }* + %58 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %57, i32 0, i32 1 + store %Array* %50, %Array** %58, align 8 + store %Array* %53, %Array** %59, align 8 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i64* + store i64 0, i64* %62, align 4 + %63 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %57, %Array* %60) + %64 = sub i64 %0, 1 + br label %header__4 + +continue__1: ; No predecessors! + unreachable + +header__2: ; preds = %exiting__2, %then0__1 + %65 = phi i64 [ 0, %then0__1 ], [ %76, %exiting__2 ] + %66 = icmp sle i64 %65, %35 + br i1 %66, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %65) + %68 = bitcast i8* %67 to { { double, double }*, %Array* }** + %69 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %68, align 8 + %70 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %69, i32 0, i32 0 + %71 = load { double, double }*, { double, double }** %70, align 8 + %72 = bitcast { double, double }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %72, i32 -1) + %73 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %69, i32 0, i32 1 + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + %75 = bitcast { { double, double }*, %Array* }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %75, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %65, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %34 + +header__3: ; preds = %exiting__3, %then1__1 + %77 = phi i64 [ 0, %then1__1 ], [ %88, %exiting__3 ] + %78 = icmp sle i64 %77, %49 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %77) + %80 = bitcast i8* %79 to { { double, double }*, %Array* }** + %81 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %80, align 8 + %82 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %81, i32 0, i32 0 + %83 = load { double, double }*, { double, double }** %82, align 8 + %84 = bitcast { double, double }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %84, i32 -1) + %85 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %81, i32 0, i32 1 + %86 = load %Array*, %Array** %85, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %86, i32 -1) + %87 = bitcast { { double, double }*, %Array* }* %81 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %87, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %88 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %44, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %48 + +header__4: ; preds = %exiting__4, %else__1 + %89 = phi i64 [ 0, %else__1 ], [ %100, %exiting__4 ] + %90 = icmp sle i64 %89, %64 + br i1 %90, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %89) + %92 = bitcast i8* %91 to { { double, double }*, %Array* }** + %93 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %92, align 8 + %94 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %93, i32 0, i32 0 + %95 = load { double, double }*, { double, double }** %94, align 8 + %96 = bitcast { double, double }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %96, i32 -1) + %97 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %93, i32 0, i32 1 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 -1) + %99 = bitcast { { double, double }*, %Array* }* %93 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %99, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %100 = add i64 %89, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %63 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerBitString__body(i64 %nFermions, %Array* %idxFermions) { +entry: + %zString = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %1 = srem i64 %0, 2 + %2 = icmp ne i64 %1, 0 + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([86 x i8], [86 x i8]* @26, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nFermions) + %5 = sub i64 %nFermions, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %6 = phi i64 [ 0, %continue__1 ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %6) + %9 = bitcast i8* %8 to i1* + store i1 false, i1* %9, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %4, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %24, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %13) + %16 = bitcast i8* %15 to i64* + %fermionIdx = load i64, i64* %16, align 4 + %17 = icmp sge i64 %fermionIdx, %nFermions + br i1 %17, label %then0__2, label %continue__2 + +then0__2: ; preds = %body__2 + %18 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @27, i32 0, i32 0)) + %19 = call %String* @__quantum__rt__int_to_string(i64 %fermionIdx) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @28, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__fail(%String* %22) + unreachable + +continue__2: ; preds = %body__2 + br label %header__3 + +exiting__2: ; preds = %exit__3 + %24 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %25 = sub i64 %11, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %continue__2 + %idx = phi i64 [ 0, %continue__2 ], [ %35, %exiting__3 ] + %26 = icmp sle i64 %idx, %fermionIdx + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = call %Array* @__quantum__rt__array_copy(%Array* %27, i1 false) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %idx) + %30 = bitcast i8* %29 to i1* + %31 = load i1, i1* %30, align 1 + %32 = xor i1 %31, true + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %idx) + %34 = bitcast i8* %33 to i1* + store i1 %32, i1* %34, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + store %Array* %28, %Array** %zString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + br label %exiting__2 + +header__4: ; preds = %exiting__4, %exit__2 + %36 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__4 ] + %37 = icmp sle i64 %36, %25 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %36) + %39 = bitcast i8* %38 to i64* + %fermionIdx__1 = load i64, i64* %39, align 4 + %40 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + %41 = call %Array* @__quantum__rt__array_copy(%Array* %40, i1 false) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %fermionIdx__1) + %43 = bitcast i8* %42 to i1* + store i1 false, i1* %43, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + store %Array* %41, %Array** %zString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %44 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %45 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + ret %Array* %45 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliZString__body(i64 %nFermions, %Array* %idxFermions) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %bitString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerBitString__body(i64 %nFermions, %Array* %idxFermions) + call void @__quantum__rt__array_update_alias_count(%Array* %bitString, i32 1) + %0 = call %Array* @Microsoft__Quantum__Convert__BoolArrayAsPauli__body(i2 -2, i1 true, %Array* %bitString) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bitString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bitString, i32 -1) + ret %Array* %0 +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorEvolutionSet__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorFunction____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorFunction____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array* }*, %Array* }* + %1 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___JordanWignerClusterOperatorFunction____body({ { %Array*, %Array* }*, %Array* }* %0) + %2 = bitcast %Tuple* %result-tuple to { { %Callable* }* }* + %3 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %2, i32 0, i32 0 + store { %Callable* }* %1, { %Callable* }** %3, align 8 + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorGeneratorSystem__body(%Array* %data) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4__JordanWignerStateAsGeneratorIndex____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %16) + %19 = bitcast i8* %18 to { { double, double }*, %Array* }** + %20 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 0 + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 1) + %24 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 1) + %26 = bitcast { { double, double }*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %data, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, %Array* }* + %30 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %29, i32 0, i32 1 + store %Callable* %14, %Callable** %30, align 8 + store %Array* %data, %Array** %31, align 8 + %32 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__38__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__23__FunctionTable, %Tuple* %28) + %33 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %0, %Callable* %32) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %46, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %35) + %38 = bitcast i8* %37 to { { double, double }*, %Array* }** + %39 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %39, i32 0, i32 0 + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %43 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %39, i32 0, i32 1 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %45 = bitcast { { double, double }*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret { i64, %Callable* }* %33 +} + +define internal void @Lifted__PartialApplication__38__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64 }* + %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, i64 }* getelementptr ({ %Array*, i64 }, { %Array*, i64 }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, i64 }* + %8 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store i64 %5, i64* %9, align 4 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4__JordanWignerStateAsGeneratorIndex____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load i64, i64* %2, align 4 + %5 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4__JordanWignerStateAsGeneratorIndex____body(%Array* %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %7 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %6, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %5, { { %Array*, %Array* }*, %Array* }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__23__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__23__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %data) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 0 + %ZData = load %Array*, %Array** %0, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZData) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %3) + %6 = bitcast i8* %5 to { %Array*, %Array* }** + %7 = load { %Array*, %Array* }*, { %Array*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array*, %Array* }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %14 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 1 + %ZZData = load %Array*, %Array** %14, align 8 + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZZData) + %16 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %17) + %20 = bitcast i8* %19 to { %Array*, %Array* }** + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %28 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 2 + %PQandPQQRData = load %Array*, %Array** %28, align 8 + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %PQandPQQRData) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %31) + %34 = bitcast i8* %33 to { %Array*, %Array* }** + %35 = load { %Array*, %Array* }*, { %Array*, %Array* }** %34, align 8 + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = load %Array*, %Array** %36, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %37, i32 1) + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + %39 = load %Array*, %Array** %38, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 1) + %40 = bitcast { %Array*, %Array* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %42 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 3 + %h0123Data = load %Array*, %Array** %42, align 8 + %43 = call i64 @__quantum__rt__array_get_size_1d(%Array* %h0123Data) + %44 = sub i64 %43, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %55, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %45) + %48 = bitcast i8* %47 to { %Array*, %Array* }** + %49 = load { %Array*, %Array* }*, { %Array*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 0 + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 1) + %52 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 1 + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + %54 = bitcast { %Array*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %55 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %56 = bitcast { %Array*, %Array*, %Array*, %Array* }* %data to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = sub i64 %1, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %58 = phi i64 [ 0, %exit__4 ], [ %68, %exiting__5 ] + %59 = icmp sle i64 %58, %57 + br i1 %59, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %58) + %61 = bitcast i8* %60 to { %Array*, %Array* }** + %62 = load { %Array*, %Array* }*, { %Array*, %Array* }** %61, align 8 + %63 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 0 + %64 = load %Array*, %Array** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %65 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + %67 = bitcast { %Array*, %Array* }* %62 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %68 = add i64 %58, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %69 = sub i64 %15, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %70 = phi i64 [ 0, %exit__5 ], [ %80, %exiting__6 ] + %71 = icmp sle i64 %70, %69 + br i1 %71, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %70) + %73 = bitcast i8* %72 to { %Array*, %Array* }** + %74 = load { %Array*, %Array* }*, { %Array*, %Array* }** %73, align 8 + %75 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 0 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 1 + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %78, i32 1) + %79 = bitcast { %Array*, %Array* }* %74 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %70, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %81 = sub i64 %29, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %82 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %82) + %85 = bitcast i8* %84 to { %Array*, %Array* }** + %86 = load { %Array*, %Array* }*, { %Array*, %Array* }** %85, align 8 + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + %89 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 1 + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 1) + %91 = bitcast { %Array*, %Array* }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %82, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %93 = sub i64 %43, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %94 = phi i64 [ 0, %exit__7 ], [ %104, %exiting__8 ] + %95 = icmp sle i64 %94, %93 + br i1 %95, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %94) + %97 = bitcast i8* %96 to { %Array*, %Array* }** + %98 = load { %Array*, %Array* }*, { %Array*, %Array* }** %97, align 8 + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 1 + %102 = load %Array*, %Array** %101, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %103 = bitcast { %Array*, %Array* }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %104 = add i64 %94, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %105 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i64* + store i64 0, i64* %107, align 4 + %ZGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %ZData, %Array* %105) + %108 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %ZGenSys, i32 0, i32 1 + %109 = load %Callable*, %Callable** %108, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %109, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %109, i32 1) + %110 = bitcast { i64, %Callable* }* %ZGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %110, i32 1) + %111 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %111, i64 0) + %113 = bitcast i8* %112 to i64* + store i64 1, i64* %113, align 4 + %ZZGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %ZZData, %Array* %111) + %114 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %ZZGenSys, i32 0, i32 1 + %115 = load %Callable*, %Callable** %114, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %115, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %115, i32 1) + %116 = bitcast { i64, %Callable* }* %ZZGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + %117 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %117, i64 0) + %119 = bitcast i8* %118 to i64* + store i64 2, i64* %119, align 4 + %PQandPQQRGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %PQandPQQRData, %Array* %117) + %120 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %PQandPQQRGenSys, i32 0, i32 1 + %121 = load %Callable*, %Callable** %120, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %121, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %121, i32 1) + %122 = bitcast { i64, %Callable* }* %PQandPQQRGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 1) + %123 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %123, i64 0) + %125 = bitcast i8* %124 to i64* + store i64 3, i64* %125, align 4 + %h0123GenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %h0123Data, %Array* %123) + %126 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %h0123GenSys, i32 0, i32 1 + %127 = load %Callable*, %Callable** %126, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %127, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %127, i32 1) + %128 = bitcast { i64, %Callable* }* %h0123GenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %109, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %109, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %115, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %115, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %121, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %127, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %127, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + %129 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %130 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 0) + %131 = bitcast i8* %130 to { i64, %Callable* }** + %132 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 1) + %133 = bitcast i8* %132 to { i64, %Callable* }** + %134 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 2) + %135 = bitcast i8* %134 to { i64, %Callable* }** + %136 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 3) + %137 = bitcast i8* %136 to { i64, %Callable* }** + store { i64, %Callable* }* %ZGenSys, { i64, %Callable* }** %131, align 8 + store { i64, %Callable* }* %ZZGenSys, { i64, %Callable* }** %133, align 8 + store { i64, %Callable* }* %PQandPQQRGenSys, { i64, %Callable* }** %135, align 8 + store { i64, %Callable* }* %h0123GenSys, { i64, %Callable* }** %137, align 8 + %138 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__SumGeneratorSystems__body(%Array* %129) + %139 = sub i64 %1, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %140 = phi i64 [ 0, %exit__8 ], [ %150, %exiting__9 ] + %141 = icmp sle i64 %140, %139 + br i1 %141, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %140) + %143 = bitcast i8* %142 to { %Array*, %Array* }** + %144 = load { %Array*, %Array* }*, { %Array*, %Array* }** %143, align 8 + %145 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %144, i32 0, i32 0 + %146 = load %Array*, %Array** %145, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %146, i32 -1) + %147 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %144, i32 0, i32 1 + %148 = load %Array*, %Array** %147, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %148, i32 -1) + %149 = bitcast { %Array*, %Array* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %149, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %150 = add i64 %140, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %151 = sub i64 %15, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %152 = phi i64 [ 0, %exit__9 ], [ %162, %exiting__10 ] + %153 = icmp sle i64 %152, %151 + br i1 %153, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %152) + %155 = bitcast i8* %154 to { %Array*, %Array* }** + %156 = load { %Array*, %Array* }*, { %Array*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 0 + %158 = load %Array*, %Array** %157, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %158, i32 -1) + %159 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 1 + %160 = load %Array*, %Array** %159, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %160, i32 -1) + %161 = bitcast { %Array*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %162 = add i64 %152, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %163 = sub i64 %29, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %164 = phi i64 [ 0, %exit__10 ], [ %174, %exiting__11 ] + %165 = icmp sle i64 %164, %163 + br i1 %165, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %164) + %167 = bitcast i8* %166 to { %Array*, %Array* }** + %168 = load { %Array*, %Array* }*, { %Array*, %Array* }** %167, align 8 + %169 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %168, i32 0, i32 0 + %170 = load %Array*, %Array** %169, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %170, i32 -1) + %171 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %168, i32 0, i32 1 + %172 = load %Array*, %Array** %171, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %172, i32 -1) + %173 = bitcast { %Array*, %Array* }* %168 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %173, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %174 = add i64 %164, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %175 = sub i64 %43, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %176 = phi i64 [ 0, %exit__11 ], [ %186, %exiting__12 ] + %177 = icmp sle i64 %176, %175 + br i1 %177, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %176) + %179 = bitcast i8* %178 to { %Array*, %Array* }** + %180 = load { %Array*, %Array* }*, { %Array*, %Array* }** %179, align 8 + %181 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %180, i32 0, i32 0 + %182 = load %Array*, %Array** %181, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 -1) + %183 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %180, i32 0, i32 1 + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %184, i32 -1) + %185 = bitcast { %Array*, %Array* }* %180 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %185, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %186 = add i64 %176, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + %187 = sub i64 %1, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %188 = phi i64 [ 0, %exit__12 ], [ %198, %exiting__13 ] + %189 = icmp sle i64 %188, %187 + br i1 %189, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %188) + %191 = bitcast i8* %190 to { %Array*, %Array* }** + %192 = load { %Array*, %Array* }*, { %Array*, %Array* }** %191, align 8 + %193 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %192, i32 0, i32 0 + %194 = load %Array*, %Array** %193, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %194, i32 -1) + %195 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %192, i32 0, i32 1 + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %196, i32 -1) + %197 = bitcast { %Array*, %Array* }* %192 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %197, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %198 = add i64 %188, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %199 = sub i64 %15, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %200 = phi i64 [ 0, %exit__13 ], [ %210, %exiting__14 ] + %201 = icmp sle i64 %200, %199 + br i1 %201, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %202 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %200) + %203 = bitcast i8* %202 to { %Array*, %Array* }** + %204 = load { %Array*, %Array* }*, { %Array*, %Array* }** %203, align 8 + %205 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %204, i32 0, i32 0 + %206 = load %Array*, %Array** %205, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 -1) + %207 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %204, i32 0, i32 1 + %208 = load %Array*, %Array** %207, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %208, i32 -1) + %209 = bitcast { %Array*, %Array* }* %204 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %210 = add i64 %200, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %211 = sub i64 %29, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %212 = phi i64 [ 0, %exit__14 ], [ %222, %exiting__15 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %212) + %215 = bitcast i8* %214 to { %Array*, %Array* }** + %216 = load { %Array*, %Array* }*, { %Array*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 0 + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + %219 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 1 + %220 = load %Array*, %Array** %219, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %220, i32 -1) + %221 = bitcast { %Array*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %221, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %222 = add i64 %212, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %223 = sub i64 %43, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %224 = phi i64 [ 0, %exit__15 ], [ %234, %exiting__16 ] + %225 = icmp sle i64 %224, %223 + br i1 %225, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %226 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %224) + %227 = bitcast i8* %226 to { %Array*, %Array* }** + %228 = load { %Array*, %Array* }*, { %Array*, %Array* }** %227, align 8 + %229 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + %231 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 1 + %232 = load %Array*, %Array** %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %232, i32 -1) + %233 = bitcast { %Array*, %Array* }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %233, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %234 = add i64 %224, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %109, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %109, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %110, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %115, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %115, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %121, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %121, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %127, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %127, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %109, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %109, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %115, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %115, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %117, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %121, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %123, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %127, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %127, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 -1) + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %235 = phi i64 [ 0, %exit__16 ], [ %243, %exiting__17 ] + %236 = icmp sle i64 %235, 3 + br i1 %236, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %235) + %238 = bitcast i8* %237 to { i64, %Callable* }** + %239 = load { i64, %Callable* }*, { i64, %Callable* }** %238, align 8 + %240 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %239, i32 0, i32 1 + %241 = load %Callable*, %Callable** %240, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %241, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %241, i32 -1) + %242 = bitcast { i64, %Callable* }* %239 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %242, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %243 = add i64 %235, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 -1) + ret { i64, %Callable* }* %138 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %data, %Array* %termType) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %15) + %18 = bitcast i8* %17 to { %Array*, %Array* }** + %19 = load { %Array*, %Array* }*, { %Array*, %Array* }** %18, align 8 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 0 + %21 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 1) + %24 = bitcast { %Array*, %Array* }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, %Array*, %Array* }* + %28 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 2 + store %Callable* %13, %Callable** %28, align 8 + store %Array* %data, %Array** %29, align 8 + store %Array* %termType, %Array** %30, align 8 + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__43__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__28__FunctionTable, %Tuple* %26) + %32 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %0, %Callable* %31) + %33 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %34) + %37 = bitcast i8* %36 to { %Array*, %Array* }** + %38 = load { %Array*, %Array* }*, { %Array*, %Array* }** %37, align 8 + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 1 + %42 = load %Array*, %Array** %41, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + %43 = bitcast { %Array*, %Array* }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + ret { i64, %Callable* }* %32 +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSparseMultiConfigurationalState__body(%Callable* %initialStatePreparation, %Array* %excitations, %Array* %qubits) { +entry: + %success = alloca i1, align 1 + %applyFlips = alloca %Array*, align 8 + %coefficientsNewComplexPolar = alloca %Array*, align 8 + %coefficientsSqrtAbs = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 1) + %nExcitations = call i64 @__quantum__rt__array_get_size_1d(%Array* %excitations) + %0 = sub i64 %nExcitations, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %1) + %4 = bitcast i8* %3 to { { double, double }*, %Array* }** + %5 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %4, align 8 + %6 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %5, i32 0, i32 0 + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %5, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { { double, double }*, %Array* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %excitations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %13 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %14 = sub i64 %nExcitations, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %15) + %18 = bitcast i8* %17 to double* + store double 0.000000e+00, double* %18, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %13, %Array** %coefficientsSqrtAbs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %20 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %22 = sub i64 %nExcitations, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %23) + %26 = bitcast i8* %25 to { double, double }** + store { double, double }* %20, { double, double }** %26, align 8 + %27 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %21, %Array** %coefficientsNewComplexPolar, align 8 + %29 = sub i64 %nExcitations, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %30 = phi i64 [ 0, %exit__3 ], [ %36, %exiting__4 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %30) + %33 = bitcast i8* %32 to { double, double }** + %34 = load { double, double }*, { double, double }** %33, align 8 + %35 = bitcast { double, double }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %36 = add i64 %30, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %39 = sub i64 %nExcitations, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %40 = phi i64 [ 0, %exit__4 ], [ %44, %exiting__5 ] + %41 = icmp sle i64 %40, %39 + br i1 %41, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %40) + %43 = bitcast i8* %42 to %Array** + store %Array* %37, %Array** %43, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %44 = add i64 %40, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + store %Array* %38, %Array** %applyFlips, align 8 + %45 = sub i64 %nExcitations, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %46 = phi i64 [ 0, %exit__5 ], [ %51, %exiting__6 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %46) + %49 = bitcast i8* %48 to %Array** + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %51 = add i64 %46, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 1) + %52 = sub i64 %nExcitations, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %idx = phi i64 [ 0, %exit__6 ], [ %94, %exiting__7 ] + %53 = icmp sle i64 %idx, %52 + br i1 %53, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %idx) + %55 = bitcast i8* %54 to { { double, double }*, %Array* }** + %56 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %55, align 8 + %57 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %56, i32 0, i32 0 + %x = load { double, double }*, { double, double }** %57, align 8 + %58 = bitcast { double, double }* %x to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 1) + %59 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %56, i32 0, i32 1 + %excitation = load %Array*, %Array** %59, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 1) + %60 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 -1) + %61 = call %Array* @__quantum__rt__array_copy(%Array* %60, i1 false) + %62 = getelementptr inbounds { double, double }, { double, double }* %x, i32 0, i32 0 + %63 = getelementptr inbounds { double, double }, { double, double }* %x, i32 0, i32 1 + %64 = load double, double* %62, align 8 + %65 = load double, double* %63, align 8 + %66 = call { double, double }* @Microsoft__Quantum__Math__Complex__body(double %64, double %65) + %67 = call { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %66) + %d = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %67) + %68 = call double @__quantum__qis__sqrt__body(double %d) + %69 = bitcast { double, double }* %66 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %69, i32 -1) + %70 = bitcast { double, double }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %70, i32 -1) + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %idx) + %72 = bitcast i8* %71 to double* + store double %68, double* %72, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %61, i32 1) + store %Array* %61, %Array** %coefficientsSqrtAbs, align 8 + %73 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 -1) + %74 = call %Array* @__quantum__rt__array_copy(%Array* %73, i1 false) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %idx) + %76 = bitcast i8* %75 to double* + %77 = load double, double* %76, align 8 + %78 = call { double, double }* @Microsoft__Quantum__Math__Complex__body(double %64, double %65) + %79 = call { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %78) + %80 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %79) + %81 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %77, double %80) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 %idx) + %83 = bitcast i8* %82 to { double, double }** + %84 = bitcast { double, double }* %81 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %84, i32 1) + %85 = load { double, double }*, { double, double }** %83, align 8 + %86 = bitcast { double, double }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 -1) + store { double, double }* %81, { double, double }** %83, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 1) + store %Array* %74, %Array** %coefficientsNewComplexPolar, align 8 + %87 = load %Array*, %Array** %applyFlips, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %87, i32 -1) + %88 = call %Array* @__quantum__rt__array_copy(%Array* %87, i1 false) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 %idx) + %90 = bitcast i8* %89 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %excitation, i32 1) + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %91, i32 -1) + store %Array* %excitation, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + store %Array* %88, %Array** %applyFlips, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 -1) + %92 = bitcast { double, double }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %92, i32 -1) + %93 = bitcast { double, double }* %79 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %93, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %94 = add i64 %idx, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + %95 = sitofp i64 %nExcitations to double + %96 = call double @Microsoft__Quantum__Math__Lg__body(double %95) + %nBitsIndices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %96) + br label %repeat__1 + +repeat__1: ; preds = %fixup__1, %exit__7 + store i1 false, i1* %success, align 1 + %97 = add i64 %nBitsIndices, 1 + %auxillary = call %Array* @__quantum__rt__qubit_allocate_array(i64 %97) + call void @__quantum__rt__array_update_alias_count(%Array* %auxillary, i32 1) + %flag = call %Qubit* @__quantum__rt__qubit_allocate() + %98 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___PrepareSingleConfigurationalStateSingleSiteOccupation____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %99 = load %Array*, %Array** %applyFlips, align 8 + %100 = call %Array* @Microsoft__Quantum__Arrays___96177b3bdf29439bb7ea0c139e372afd_Mapped__body(%Callable* %98, %Array* %99) + %101 = call %Callable* @Microsoft__Quantum__Arrays___c1a84e3a65724cc49ff23eb15bf0b7bd_LookupFunction__body(%Array* %100) + %multiplexer = call %Callable* @Microsoft__Quantum__Canon__MultiplexerBruteForceFromGenerator__body(i64 %nExcitations, %Callable* %101) + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 1) + %102 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + %103 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__body(%Array* %102, { %Array* }* %103) + %104 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %105 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %106 = bitcast %Tuple* %105 to { { %Array* }*, %Array* }* + %107 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %106, i32 0, i32 0 + %108 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %106, i32 0, i32 1 + store { %Array* }* %104, { %Array* }** %107, align 8 + store %Array* %qubits, %Array** %108, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %multiplexer, %Tuple* %105, %Tuple* null) + %109 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + %110 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %109, { %Array* }* %110) + %111 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %112 = call %Callable* @Microsoft__Quantum__Canon___78aac080fc954ed4b9e248419df94bda_ControlledOnInt__body(i64 0, %Callable* %111) + call void @__quantum__rt__array_update_reference_count(%Array* %auxillary, i32 1) + %113 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %114 = bitcast %Tuple* %113 to { %Array*, %Qubit* }* + %115 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %114, i32 0, i32 0 + %116 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %114, i32 0, i32 1 + store %Array* %auxillary, %Array** %115, align 8 + store %Qubit* %flag, %Qubit** %116, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %112, %Tuple* %113, %Tuple* null) + %outcome = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %flag) + %117 = call %Result* @__quantum__rt__result_get_one() + %118 = call i1 @__quantum__rt__result_equal(%Result* %outcome, %Result* %117) + store i1 %118, i1* %success, align 1 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %auxillary) + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %flag) + %119 = getelementptr inbounds { %Array* }, { %Array* }* %103, i32 0, i32 0 + %120 = load %Array*, %Array** %119, align 8 + %121 = getelementptr inbounds { %Array* }, { %Array* }* %104, i32 0, i32 0 + %122 = load %Array*, %Array** %121, align 8 + %123 = getelementptr inbounds { %Array* }, { %Array* }* %110, i32 0, i32 0 + %124 = load %Array*, %Array** %123, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %98, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %98, i32 -1) + %125 = call i64 @__quantum__rt__array_get_size_1d(%Array* %100) + %126 = sub i64 %125, 1 + br label %header__8 + +until__1: ; preds = %exit__8 + br i1 %118, label %rend__1, label %fixup__1 + +fixup__1: ; preds = %until__1 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) + br label %repeat__1 + +rend__1: ; preds = %until__1 + %127 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + %128 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + %129 = load %Array*, %Array** %applyFlips, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + %130 = sub i64 %nExcitations, 1 + br label %header__9 + +header__8: ; preds = %exiting__8, %repeat__1 + %131 = phi i64 [ 0, %repeat__1 ], [ %136, %exiting__8 ] + %132 = icmp sle i64 %131, %126 + br i1 %132, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 %131) + %134 = bitcast i8* %133 to %Callable** + %135 = load %Callable*, %Callable** %134, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %135, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %135, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %136 = add i64 %131, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + %137 = bitcast { %Array* }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %137, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %122, i32 -1) + %138 = bitcast { %Array* }* %104 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %105, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %124, i32 -1) + %139 = bitcast { %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %139, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %112, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %112, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxillary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %113, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %outcome, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %flag) + call void @__quantum__rt__array_update_alias_count(%Array* %auxillary, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %auxillary) + br label %until__1 + +header__9: ; preds = %exiting__9, %rend__1 + %140 = phi i64 [ 0, %rend__1 ], [ %151, %exiting__9 ] + %141 = icmp sle i64 %140, %130 + br i1 %141, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %140) + %143 = bitcast i8* %142 to { { double, double }*, %Array* }** + %144 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %143, align 8 + %145 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %144, i32 0, i32 0 + %146 = load { double, double }*, { double, double }** %145, align 8 + %147 = bitcast { double, double }* %146 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %147, i32 -1) + %148 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %144, i32 0, i32 1 + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 -1) + %150 = bitcast { { double, double }*, %Array* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %150, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %151 = add i64 %140, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %excitations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 -1) + %152 = call i64 @__quantum__rt__array_get_size_1d(%Array* %128) + %153 = sub i64 %152, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %154 = phi i64 [ 0, %exit__9 ], [ %160, %exiting__10 ] + %155 = icmp sle i64 %154, %153 + br i1 %155, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %156 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %154) + %157 = bitcast i8* %156 to { double, double }** + %158 = load { double, double }*, { double, double }** %157, align 8 + %159 = bitcast { double, double }* %158 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %160 = add i64 %154, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %128, i32 -1) + %161 = call i64 @__quantum__rt__array_get_size_1d(%Array* %129) + %162 = sub i64 %161, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %163) + %166 = bitcast i8* %165 to %Array** + %167 = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %129, i32 -1) + %169 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %169, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %127, i32 -1) + %170 = sub i64 %152, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %177, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %171) + %174 = bitcast i8* %173 to { double, double }** + %175 = load { double, double }*, { double, double }** %174, align 8 + %176 = bitcast { double, double }* %175 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %176, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %177 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %128, i32 -1) + %178 = sub i64 %161, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %179 = phi i64 [ 0, %exit__12 ], [ %184, %exiting__13 ] + %180 = icmp sle i64 %179, %178 + br i1 %180, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %179) + %182 = bitcast i8* %181 to %Array** + %183 = load %Array*, %Array** %182, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %183, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %184 = add i64 %179, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 -1) + ret void +} + +declare void @__quantum__rt__qubit_release(%Qubit*) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___PrepareSingleConfigurationalStateSingleSiteOccupation____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Callable* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef4___PrepareSingleConfigurationalStateSingleSiteOccupation____body(%Array* %2) + %4 = bitcast %Tuple* %result-tuple to { %Callable* }* + %5 = getelementptr inbounds { %Callable* }, { %Callable* }* %4, i32 0, i32 0 + store %Callable* %3, %Callable** %5, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %stateData, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %stateData, i32 0, i32 1 + %terms = load %Array*, %Array** %0, align 8 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %terms) + %1 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 1) + %14 = bitcast { i64, %Array* }* %stateData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %15 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %stateData, i32 0, i32 0 + %stateType = load i64, i64* %15, align 4 + %16 = sub i64 %nTerms, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %17) + %20 = bitcast i8* %19 to { { double, double }*, %Array* }** + %21 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %21, i32 0, i32 0 + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %21, i32 0, i32 1 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = bitcast { { double, double }*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 1) + %29 = icmp eq i64 %stateType, 2 + br i1 %29, label %then0__1, label %test1__1 + +then0__1: ; preds = %exit__2 + %30 = call i1 @Microsoft__Quantum__Arrays___7ba2bd7c451647258703e1788550e293_IsEmpty__body(%Array* %terms) + br i1 %30, label %then0__2, label %test1__2 + +then0__2: ; preds = %then0__1 + br label %continue__2 + +test1__2: ; preds = %then0__1 + %31 = icmp eq i64 %nTerms, 1 + br i1 %31, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 0) + %33 = bitcast i8* %32 to { { double, double }*, %Array* }** + %34 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %33, align 8 + %35 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %34, i32 0, i32 0 + %coefficient = load { double, double }*, { double, double }** %35, align 8 + %36 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 1) + %37 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %34, i32 0, i32 1 + %qubitIndices = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %qubitIndices, %Array* %qubits) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + br label %continue__2 + +else__1: ; preds = %test1__2 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSparseMultiConfigurationalState__body(%Callable* %38, %Array* %terms, %Array* %qubits) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %38, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %then1__1, %then0__2 + br label %continue__1 + +test1__1: ; preds = %exit__2 + %39 = icmp eq i64 %stateType, 3 + br i1 %39, label %then1__2, label %continue__1 + +then1__2: ; preds = %test1__1 + %40 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %41 = sub i64 %nTerms, 1 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %41) + %43 = bitcast i8* %42 to { { double, double }*, %Array* }** + %44 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %43, align 8 + %45 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %44, i32 0, i32 0 + %46 = load { double, double }*, { double, double }** %45, align 8 + %47 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %44, i32 0, i32 1 + %48 = load %Array*, %Array** %47, align 8 + %49 = bitcast { double, double }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %48, i32 1) + %50 = bitcast { { double, double }*, %Array* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 1) + %51 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to { { double, double }*, %Array* }** + store { { double, double }*, %Array* }* %44, { { double, double }*, %Array* }** %53, align 8 + %54 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %55 = bitcast %Tuple* %54 to { %Callable*, i64, %Array* }* + %56 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 0 + %57 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 1 + %58 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 2 + store %Callable* %40, %Callable** %56, align 8 + store i64 2, i64* %57, align 4 + store %Array* %51, %Array** %58, align 8 + %referenceState = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__39__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__24__FunctionTable, %Tuple* %54) + call void @__quantum__rt__capture_update_alias_count(%Callable* %referenceState, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %referenceState, i32 1) + %59 = sub i64 %nTerms, 2 + %60 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %59, 2 + %61 = call %Array* @__quantum__rt__array_slice_1d(%Array* %terms, %Range %60, i1 true) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareUnitaryCoupledClusterState__body(%Callable* %referenceState, %Array* %61, double 1.000000e+00, %Array* %qubits) + call void @__quantum__rt__capture_update_alias_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %61, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__2, %test1__1, %continue__2 + %62 = sub i64 %nTerms, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %continue__1 + %63 = phi i64 [ 0, %continue__1 ], [ %74, %exiting__3 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %63) + %66 = bitcast i8* %65 to { { double, double }*, %Array* }** + %67 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %66, align 8 + %68 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %67, i32 0, i32 0 + %69 = load { double, double }*, { double, double }** %68, align 8 + %70 = bitcast { double, double }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + %71 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %67, i32 0, i32 1 + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = bitcast { { double, double }*, %Array* }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %74 = add i64 %63, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %75 = sub i64 %nTerms, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %76 = phi i64 [ 0, %exit__3 ], [ %87, %exiting__4 ] + %77 = icmp sle i64 %76, %75 + br i1 %77, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %76) + %79 = bitcast i8* %78 to { { double, double }*, %Array* }** + %80 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %79, align 8 + %81 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %80, i32 0, i32 0 + %82 = load { double, double }*, { double, double }** %81, align 8 + %83 = bitcast { double, double }* %82 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %83, i32 -1) + %84 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %80, i32 0, i32 1 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { { double, double }*, %Array* }* %80 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %87 = add i64 %76, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__body(%Array* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__adj(%Array* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__ctl(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___5efc0405668d4c768edc4328b6eb53a1_NoOp__ctladj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Lifted__PartialApplication__39__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Array* }* + %7 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Array* %4, %Array** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Array* }* + %10 = getelementptr inbounds { %Array* }, { %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %13, i32 0, i32 1 + store { i64, %Array* }* %6, { i64, %Array* }** %14, align 8 + store %Array* %11, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @MemoryManagement__24__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__24__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareUnitaryCoupledClusterState__body(%Callable* %initialStatePreparation, %Array* %clusterOperator, double %trotterStepSize, %Array* %qubits) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %clusterOperator) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %clusterOperator, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %clusterOperator, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %clusterOperatorGeneratorSystem = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorGeneratorSystem__body(%Array* %clusterOperator) + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %clusterOperatorGeneratorSystem, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 1) + %16 = bitcast { i64, %Callable* }* %clusterOperatorGeneratorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorEvolutionSet__body() + %evolutionGenerator = call { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %17, { i64, %Callable* }* %clusterOperatorGeneratorSystem) + %18 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %19 = load { %Callable* }*, { %Callable* }** %18, align 8 + %20 = getelementptr inbounds { %Callable* }, { %Callable* }* %19, i32 0, i32 0 + %21 = load %Callable*, %Callable** %20, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 1) + %22 = bitcast { %Callable* }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + %23 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %24 = load { i64, %Callable* }*, { i64, %Callable* }** %23, align 8 + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 1) + %27 = bitcast { i64, %Callable* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + %28 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + %29 = call { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 1) + %30 = getelementptr inbounds { %Callable* }, { %Callable* }* %29, i32 0, i32 0 + %simulationAlgorithm = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %33 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 1 + %35 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 2 + store %Callable* %simulationAlgorithm, %Callable** %33, align 8 + store double 1.000000e+00, double* %34, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %35, align 8 + %oracle = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__40__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__25__FunctionTable, %Tuple* %31) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array* }* + %38 = getelementptr inbounds { %Array* }, { %Array* }* %37, i32 0, i32 0 + store %Array* %qubits, %Array** %38, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %initialStatePreparation, %Tuple* %36, %Tuple* null) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Array* }* + %41 = getelementptr inbounds { %Array* }, { %Array* }* %40, i32 0, i32 0 + store %Array* %qubits, %Array** %41, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %oracle, %Tuple* %39, %Tuple* null) + %42 = getelementptr inbounds { %Callable* }, { %Callable* }* %17, i32 0, i32 0 + %43 = load %Callable*, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + %44 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %45 = phi i64 [ 0, %exit__1 ], [ %56, %exiting__2 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %clusterOperator, i64 %45) + %48 = bitcast i8* %47 to { { double, double }*, %Array* }** + %49 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %49, i32 0, i32 0 + %51 = load { double, double }*, { double, double }** %50, align 8 + %52 = bitcast { double, double }* %51 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + %53 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %49, i32 0, i32 1 + %54 = load %Array*, %Array** %53, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %54, i32 -1) + %55 = bitcast { { double, double }*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %56 = add i64 %45, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %clusterOperator, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %43, i32 -1) + %57 = bitcast { %Callable* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %57, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + %58 = bitcast { %Callable* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %58, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @MemoryManagement__25__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__25__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____body({ i64, %Array* }* %inputState, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { double, double }*, %Array* }** + %8 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 0 + %10 = load { double, double }*, { double, double }** %9, align 8 + %11 = bitcast { double, double }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { { double, double }*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %inputState, %Array* %qubits) + %17 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %18) + %21 = bitcast i8* %20 to { { double, double }*, %Array* }** + %22 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 0 + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + %26 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = bitcast { { double, double }*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____adj({ i64, %Array* }* %inputState, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { double, double }*, %Array* }** + %8 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 0 + %10 = load { double, double }*, { double, double }** %9, align 8 + %11 = bitcast { double, double }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { { double, double }*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) + %17 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %18) + %21 = bitcast i8* %20 to { { double, double }*, %Array* }** + %22 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 0 + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + %26 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = bitcast { { double, double }*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__MeasurementOperators__body(i64 %nQubits, %Array* %indices, i64 %termType) { +entry: + %op__2 = alloca %Array*, align 8 + %compactOp__1 = alloca %Array*, align 8 + %op__1 = alloca %Array*, align 8 + %compactOp = alloca %Array*, align 8 + %op = alloca %Array*, align 8 + %ops = alloca %Array*, align 8 + %nOps = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + store i64 0, i64* %nOps, align 4 + %0 = icmp eq i64 %termType, 2 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + store i64 2, i64* %nOps, align 4 + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i64 %termType, 3 + br i1 %1, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + store i64 8, i64* %nOps, align 4 + br label %continue__1 + +else__1: ; preds = %test1__1 + store i64 1, i64* %nOps, align 4 + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + %2 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 0) + %3 = load i64, i64* %nOps, align 4 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %3) + %5 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %6 = phi i64 [ 0, %continue__1 ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %6) + %9 = bitcast i8* %8 to %Array** + store %Array* %2, %Array** %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %2, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %4, %Array** %ops, align 8 + %11 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %17, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %12) + %15 = bitcast i8* %14 to %Array** + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %17 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %18 = icmp eq i64 %termType, 0 + br i1 %18, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %exit__2 + %19 = icmp eq i64 %termType, 1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %exit__2 + %20 = phi i1 [ %18, %exit__2 ], [ %19, %condFalse__1 ] + br i1 %20, label %then0__2, label %test1__2 + +then0__2: ; preds = %condContinue__1 + %21 = call %Array* @Microsoft__Quantum__Arrays___b053fb32724e40cbbc4050de14397ee6_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %21, %Array** %op, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %23 = sub i64 %22, 1 + br label %header__3 + +test1__2: ; preds = %condContinue__1 + %24 = icmp eq i64 %termType, 3 + br i1 %24, label %then1__2, label %test2__1 + +then1__2: ; preds = %test1__2 + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 2) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 3) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %27, align 1 + store i2 1, i2* %29, align 1 + store i2 1, i2* %31, align 1 + store i2 1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 2) + %40 = bitcast i8* %39 to i2* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 3) + %42 = bitcast i8* %41 to i2* + store i2 -1, i2* %36, align 1 + store i2 -1, i2* %38, align 1 + store i2 -1, i2* %40, align 1 + store i2 -1, i2* %42, align 1 + %43 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 0) + %45 = bitcast i8* %44 to i2* + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 1) + %47 = bitcast i8* %46 to i2* + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 2) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 3) + %51 = bitcast i8* %50 to i2* + store i2 1, i2* %45, align 1 + store i2 1, i2* %47, align 1 + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + %52 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 0) + %54 = bitcast i8* %53 to i2* + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 1) + %56 = bitcast i8* %55 to i2* + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 2) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 3) + %60 = bitcast i8* %59 to i2* + store i2 -1, i2* %54, align 1 + store i2 -1, i2* %56, align 1 + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + %61 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 0) + %63 = bitcast i8* %62 to i2* + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 1) + %65 = bitcast i8* %64 to i2* + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 2) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 3) + %69 = bitcast i8* %68 to i2* + store i2 1, i2* %63, align 1 + store i2 -1, i2* %65, align 1 + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + %70 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 0) + %72 = bitcast i8* %71 to i2* + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 1) + %74 = bitcast i8* %73 to i2* + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 2) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 3) + %78 = bitcast i8* %77 to i2* + store i2 -1, i2* %72, align 1 + store i2 1, i2* %74, align 1 + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + %79 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 0) + %81 = bitcast i8* %80 to i2* + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 1) + %83 = bitcast i8* %82 to i2* + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 2) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 3) + %87 = bitcast i8* %86 to i2* + store i2 -1, i2* %81, align 1 + store i2 1, i2* %83, align 1 + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + %88 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) + %90 = bitcast i8* %89 to i2* + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 1) + %92 = bitcast i8* %91 to i2* + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 2) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 3) + %96 = bitcast i8* %95 to i2* + store i2 1, i2* %90, align 1 + store i2 -1, i2* %92, align 1 + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + %compactOps = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 0) + %98 = bitcast i8* %97 to %Array** + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 1) + %100 = bitcast i8* %99 to %Array** + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 2) + %102 = bitcast i8* %101 to %Array** + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 3) + %104 = bitcast i8* %103 to %Array** + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 4) + %106 = bitcast i8* %105 to %Array** + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 5) + %108 = bitcast i8* %107 to %Array** + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 6) + %110 = bitcast i8* %109 to %Array** + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 7) + %112 = bitcast i8* %111 to %Array** + store %Array* %25, %Array** %98, align 8 + store %Array* %34, %Array** %100, align 8 + store %Array* %43, %Array** %102, align 8 + store %Array* %52, %Array** %104, align 8 + store %Array* %61, %Array** %106, align 8 + store %Array* %70, %Array** %108, align 8 + store %Array* %79, %Array** %110, align 8 + store %Array* %88, %Array** %112, align 8 + br label %header__4 + +test2__1: ; preds = %test1__2 + %113 = icmp eq i64 %termType, 2 + br i1 %113, label %then2__1, label %continue__2 + +then2__1: ; preds = %test2__1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %119 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %119, i64 0) + %121 = bitcast i8* %120 to i2* + %122 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %119, i64 1) + %123 = bitcast i8* %122 to i2* + store i2 -1, i2* %121, align 1 + store i2 -1, i2* %123, align 1 + %compactOps__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 0) + %125 = bitcast i8* %124 to %Array** + %126 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 1) + %127 = bitcast i8* %126 to %Array** + store %Array* %114, %Array** %125, align 8 + store %Array* %119, %Array** %127, align 8 + br label %header__12 + +continue__2: ; preds = %exit__16, %test2__1, %exit__11, %exit__3 + %128 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %129 = call i64 @__quantum__rt__array_get_size_1d(%Array* %128) + %130 = sub i64 %129, 1 + br label %header__17 + +header__3: ; preds = %exiting__3, %then0__2 + %131 = phi i64 [ 0, %then0__2 ], [ %139, %exiting__3 ] + %132 = icmp sle i64 %131, %23 + br i1 %132, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %131) + %134 = bitcast i8* %133 to i64* + %idx = load i64, i64* %134, align 4 + %135 = load %Array*, %Array** %op, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %135, i32 -1) + %136 = call %Array* @__quantum__rt__array_copy(%Array* %135, i1 false) + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %136, i64 %idx) + %138 = bitcast i8* %137 to i2* + store i2 -2, i2* %138, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %136, i32 1) + store %Array* %136, %Array** %op, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %135, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %139 = add i64 %131, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + %140 = call %Array* @__quantum__rt__array_copy(%Array* %4, i1 false) + %141 = load %Array*, %Array** %op, align 8 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %140, i64 0) + %143 = bitcast i8* %142 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %141, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 1) + %144 = load %Array*, %Array** %143, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %144, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %144, i32 -1) + store %Array* %141, %Array** %143, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + store %Array* %140, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + br label %continue__2 + +header__4: ; preds = %exiting__4, %then1__2 + %145 = phi i64 [ 0, %then1__2 ], [ %150, %exiting__4 ] + %146 = icmp sle i64 %145, 7 + br i1 %146, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %145) + %148 = bitcast i8* %147 to %Array** + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %150 = add i64 %145, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps, i32 1) + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %iOp = phi i64 [ 0, %exit__4 ], [ %159, %exiting__5 ] + %151 = icmp sle i64 %iOp, 7 + br i1 %151, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %iOp) + %153 = bitcast i8* %152 to %Array** + %154 = load %Array*, %Array** %153, align 8 + store %Array* %154, %Array** %compactOp, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 1) + %155 = call %Array* @Microsoft__Quantum__Arrays___b053fb32724e40cbbc4050de14397ee6_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %155, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %155, i32 1) + %156 = call %Array* @Microsoft__Quantum__Arrays___8865ac95eaf34017a6ca070332d1d0d3_Zipped__body(%Array* %indices, %Array* %154) + %157 = call i64 @__quantum__rt__array_get_size_1d(%Array* %156) + %158 = sub i64 %157, 1 + br label %header__6 + +exiting__5: ; preds = %exit__9 + %159 = add i64 %iOp, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + br label %header__10 + +header__6: ; preds = %exiting__6, %body__5 + %160 = phi i64 [ 0, %body__5 ], [ %171, %exiting__6 ] + %161 = icmp sle i64 %160, %158 + br i1 %161, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %156, i64 %160) + %163 = bitcast i8* %162 to { i64, i2 }** + %164 = load { i64, i2 }*, { i64, i2 }** %163, align 8 + %165 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %164, i32 0, i32 0 + %idx__1 = load i64, i64* %165, align 4 + %166 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %164, i32 0, i32 1 + %pauli = load i2, i2* %166, align 1 + %167 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + %168 = call %Array* @__quantum__rt__array_copy(%Array* %167, i1 false) + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %168, i64 %idx__1) + %170 = bitcast i8* %169 to i2* + store i2 %pauli, i2* %170, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %168, i32 1) + store %Array* %168, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %167, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %171 = add i64 %160, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %173 = bitcast i8* %172 to i64* + %174 = load i64, i64* %173, align 4 + %175 = add i64 %174, 1 + %176 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %177 = bitcast i8* %176 to i64* + %178 = load i64, i64* %177, align 4 + %179 = sub i64 %178, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %i = phi i64 [ %175, %exit__6 ], [ %185, %exiting__7 ] + %180 = icmp sle i64 %i, %179 + br i1 %180, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %181 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %181, i32 -1) + %182 = call %Array* @__quantum__rt__array_copy(%Array* %181, i1 false) + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %i) + %184 = bitcast i8* %183 to i2* + store i2 -2, i2* %184, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 1) + store %Array* %182, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %185 = add i64 %i, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 2) + %187 = bitcast i8* %186 to i64* + %188 = load i64, i64* %187, align 4 + %189 = add i64 %188, 1 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %191 = bitcast i8* %190 to i64* + %192 = load i64, i64* %191, align 4 + %193 = sub i64 %192, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %i__1 = phi i64 [ %189, %exit__7 ], [ %199, %exiting__8 ] + %194 = icmp sle i64 %i__1, %193 + br i1 %194, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %195 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %195, i32 -1) + %196 = call %Array* @__quantum__rt__array_copy(%Array* %195, i1 false) + %197 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %196, i64 %i__1) + %198 = bitcast i8* %197 to i2* + store i2 -2, i2* %198, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %196, i32 1) + store %Array* %196, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %195, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %199 = add i64 %i__1, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + %200 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 -1) + %201 = call %Array* @__quantum__rt__array_copy(%Array* %200, i1 false) + %202 = load %Array*, %Array** %op__1, align 8 + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %201, i64 %iOp) + %204 = bitcast i8* %203 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %202, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %202, i32 1) + %205 = load %Array*, %Array** %204, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %205, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %205, i32 -1) + store %Array* %202, %Array** %204, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %201, i32 1) + store %Array* %201, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %202, i32 -1) + %206 = sub i64 %157, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %207 = phi i64 [ 0, %exit__8 ], [ %213, %exiting__9 ] + %208 = icmp sle i64 %207, %206 + br i1 %208, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %209 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %156, i64 %207) + %210 = bitcast i8* %209 to { i64, i2 }** + %211 = load { i64, i2 }*, { i64, i2 }** %210, align 8 + %212 = bitcast { i64, i2 }* %211 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %212, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %213 = add i64 %207, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %156, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %200, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %202, i32 -1) + br label %exiting__5 + +header__10: ; preds = %exiting__10, %exit__5 + %214 = phi i64 [ 0, %exit__5 ], [ %219, %exiting__10 ] + %215 = icmp sle i64 %214, 7 + br i1 %215, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %216 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %214) + %217 = bitcast i8* %216 to %Array** + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %219 = add i64 %214, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps, i32 -1) + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %220 = phi i64 [ 0, %exit__10 ], [ %225, %exiting__11 ] + %221 = icmp sle i64 %220, 7 + br i1 %221, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %220) + %223 = bitcast i8* %222 to %Array** + %224 = load %Array*, %Array** %223, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %224, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %225 = add i64 %220, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_reference_count(%Array* %compactOps, i32 -1) + br label %continue__2 + +header__12: ; preds = %exiting__12, %then2__1 + %226 = phi i64 [ 0, %then2__1 ], [ %231, %exiting__12 ] + %227 = icmp sle i64 %226, 1 + br i1 %227, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %228 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %226) + %229 = bitcast i8* %228 to %Array** + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %231 = add i64 %226, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps__1, i32 1) + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %iOp__1 = phi i64 [ 0, %exit__12 ], [ %266, %exiting__13 ] + %232 = icmp sle i64 %iOp__1, 1 + br i1 %232, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %233 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %iOp__1) + %234 = bitcast i8* %233 to %Array** + %235 = load %Array*, %Array** %234, align 8 + store %Array* %235, %Array** %compactOp__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %235, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %235, i32 1) + %236 = call %Array* @Microsoft__Quantum__Arrays___b053fb32724e40cbbc4050de14397ee6_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %236, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 1) + %nIndices = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 -1) + %237 = call %Array* @__quantum__rt__array_copy(%Array* %236, i1 false) + %238 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %235, i64 0) + %239 = bitcast i8* %238 to i2* + %240 = load i2, i2* %239, align 1 + %241 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %242 = bitcast i8* %241 to i64* + %243 = load i64, i64* %242, align 4 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %237, i64 %243) + %245 = bitcast i8* %244 to i2* + store i2 %240, i2* %245, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %237, i32 1) + store %Array* %237, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %237, i32 -1) + %246 = call %Array* @__quantum__rt__array_copy(%Array* %237, i1 false) + %247 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %235, i64 1) + %248 = bitcast i8* %247 to i2* + %249 = load i2, i2* %248, align 1 + %250 = sub i64 %nIndices, 1 + %251 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %250) + %252 = bitcast i8* %251 to i64* + %253 = load i64, i64* %252, align 4 + %254 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %246, i64 %253) + %255 = bitcast i8* %254 to i2* + %256 = load i2, i2* %255, align 1 + store i2 %249, i2* %255, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 1) + store %Array* %246, %Array** %op__2, align 8 + %257 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %258 = bitcast i8* %257 to i64* + %259 = load i64, i64* %258, align 4 + %260 = add i64 %259, 1 + %261 = sub i64 %nIndices, 1 + %262 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %261) + %263 = bitcast i8* %262 to i64* + %264 = load i64, i64* %263, align 4 + %265 = sub i64 %264, 1 + br label %header__14 + +exiting__13: ; preds = %continue__3 + %266 = add i64 %iOp__1, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + br label %header__15 + +header__14: ; preds = %exiting__14, %body__13 + %i__2 = phi i64 [ %260, %body__13 ], [ %272, %exiting__14 ] + %267 = icmp sle i64 %i__2, %265 + br i1 %267, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %268 = load %Array*, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %268, i32 -1) + %269 = call %Array* @__quantum__rt__array_copy(%Array* %268, i1 false) + %270 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %269, i64 %i__2) + %271 = bitcast i8* %270 to i2* + store i2 -2, i2* %271, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %269, i32 1) + store %Array* %269, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %268, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %272 = add i64 %i__2, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + %273 = icmp eq i64 %nIndices, 4 + br i1 %273, label %then0__3, label %continue__3 + +then0__3: ; preds = %exit__14 + %274 = load %Array*, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %274, i32 -1) + %275 = call %Array* @__quantum__rt__array_copy(%Array* %274, i1 false) + %276 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %277 = bitcast i8* %276 to i64* + %278 = load i64, i64* %277, align 4 + %279 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %280 = bitcast i8* %279 to i64* + %281 = load i64, i64* %280, align 4 + %282 = icmp slt i64 %278, %281 + br i1 %282, label %condTrue__1, label %condContinue__2 + +condTrue__1: ; preds = %then0__3 + %283 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %284 = bitcast i8* %283 to i64* + %285 = load i64, i64* %284, align 4 + %286 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %287 = bitcast i8* %286 to i64* + %288 = load i64, i64* %287, align 4 + %289 = icmp slt i64 %285, %288 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__1, %then0__3 + %290 = phi i1 [ %289, %condTrue__1 ], [ %282, %then0__3 ] + %291 = select i1 %290, i2 0, i2 -2 + %292 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %293 = bitcast i8* %292 to i64* + %294 = load i64, i64* %293, align 4 + %295 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %275, i64 %294) + %296 = bitcast i8* %295 to i2* + store i2 %291, i2* %296, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %275, i32 1) + store %Array* %275, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %274, i32 -1) + br label %continue__3 + +continue__3: ; preds = %condContinue__2, %exit__14 + %297 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %297, i32 -1) + %298 = call %Array* @__quantum__rt__array_copy(%Array* %297, i1 false) + %299 = load %Array*, %Array** %op__2, align 8 + %300 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %298, i64 %iOp__1) + %301 = bitcast i8* %300 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %299, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %299, i32 1) + %302 = load %Array*, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %302, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %302, i32 -1) + store %Array* %299, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %298, i32 1) + store %Array* %298, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %235, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %299, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %236, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %237, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %297, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %235, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %299, i32 -1) + br label %exiting__13 + +header__15: ; preds = %exiting__15, %exit__13 + %303 = phi i64 [ 0, %exit__13 ], [ %308, %exiting__15 ] + %304 = icmp sle i64 %303, 1 + br i1 %304, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %305 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %303) + %306 = bitcast i8* %305 to %Array** + %307 = load %Array*, %Array** %306, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %307, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %308 = add i64 %303, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps__1, i32 -1) + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %309 = phi i64 [ 0, %exit__15 ], [ %314, %exiting__16 ] + %310 = icmp sle i64 %309, 1 + br i1 %310, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %311 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %309) + %312 = bitcast i8* %311 to %Array** + %313 = load %Array*, %Array** %312, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %313, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %314 = add i64 %309, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_reference_count(%Array* %compactOps__1, i32 -1) + br label %continue__2 + +header__17: ; preds = %exiting__17, %continue__2 + %315 = phi i64 [ 0, %continue__2 ], [ %320, %exiting__17 ] + %316 = icmp sle i64 %315, %130 + br i1 %316, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %317 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %315) + %318 = bitcast i8* %317 to %Array** + %319 = load %Array*, %Array** %318, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %319, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %320 = add i64 %315, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %128, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %2, i32 -1) + ret %Array* %128 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__ExpandedCoefficients__body(%Array* %coeff, i64 %termType) { +entry: + %coeffs = alloca %Array*, align 8 + %nCoeffs = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + store i64 0, i64* %nCoeffs, align 4 + %0 = icmp eq i64 %termType, 2 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + store i64 2, i64* %nCoeffs, align 4 + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i64 %termType, 3 + br i1 %1, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + store i64 8, i64* %nCoeffs, align 4 + br label %continue__1 + +else__1: ; preds = %test1__1 + store i64 1, i64* %nCoeffs, align 4 + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + %2 = load i64, i64* %nCoeffs, align 4 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %2) + %4 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %5 = phi i64 [ 0, %continue__1 ], [ %9, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %5) + %8 = bitcast i8* %7 to double* + store double 0.000000e+00, double* %8, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %3, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %10 = icmp eq i64 %termType, 0 + br i1 %10, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %exit__1 + %11 = icmp eq i64 %termType, 1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %exit__1 + %12 = phi i1 [ %10, %exit__1 ], [ %11, %condFalse__1 ] + br i1 %12, label %then0__2, label %test1__2 + +then0__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + %13 = call %Array* @__quantum__rt__array_copy(%Array* %3, i1 false) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %15 = bitcast i8* %14 to double* + %16 = load double, double* %15, align 8 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 0) + %18 = bitcast i8* %17 to double* + store double %16, double* %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + store %Array* %13, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + br label %continue__2 + +test1__2: ; preds = %condContinue__1 + %19 = icmp eq i64 %termType, 2 + br i1 %19, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %test1__2 + %20 = icmp eq i64 %termType, 3 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %test1__2 + %21 = phi i1 [ %19, %test1__2 ], [ %20, %condFalse__2 ] + br i1 %21, label %then1__2, label %continue__2 + +then1__2: ; preds = %condContinue__2 + %22 = sub i64 %2, 1 + br label %header__2 + +continue__2: ; preds = %exit__2, %condContinue__2, %then0__2 + %23 = load %Array*, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + ret %Array* %23 + +header__2: ; preds = %exiting__2, %then1__2 + %i = phi i64 [ 0, %then1__2 ], [ %33, %exiting__2 ] + %24 = icmp sle i64 %i, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = load %Array*, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + %26 = call %Array* @__quantum__rt__array_copy(%Array* %25, i1 false) + %27 = sdiv i64 %i, 2 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 %27) + %29 = bitcast i8* %28 to double* + %30 = load double, double* %29, align 8 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %26, i64 %i) + %32 = bitcast i8* %31 to double* + store double %30, double* %32, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + store %Array* %26, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %i, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + br label %continue__2 +} + +define internal void @Lifted__PartialApplication__41__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Array* }* %2, { i64, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__41__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Array* }* %2, { i64, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____body({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef4___PrepareTrialState____adj({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @MemoryManagement__26__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Array* }*, { i64, %Array* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { double, double }*, %Array* }** + %13 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 0 + %15 = load { double, double }*, { double, double }** %14, align 8 + %16 = bitcast { double, double }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 %count-change) + %19 = bitcast { { double, double }*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 %count-change) + %21 = bitcast { i64, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__26__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Array* }*, { i64, %Array* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { double, double }*, %Array* }** + %13 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 0 + %15 = load { double, double }*, { double, double }** %14, align 8 + %16 = bitcast { double, double }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 %count-change) + %19 = bitcast { { double, double }*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 %count-change) + %21 = bitcast { i64, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateTermExpectation__body(%Callable* %inputStateUnitary, %Array* %ops, %Array* %coeffs, i64 %nQubits, i64 %nSamples) { +entry: + %jwTermEnergy = alloca double, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ops) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 1) + store double 0.000000e+00, double* %jwTermEnergy, align 8 + %8 = call %Array* @Microsoft__Quantum__Arrays___9d3e86ab94fe4a3a88b26f8cc32a1792_Zipped__body(%Array* %coeffs, %Array* %ops) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %8) + %10 = sub i64 %9, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %11 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %11) + %14 = bitcast i8* %13 to { double, %Array* }** + %15 = load { double, %Array* }*, { double, %Array* }** %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %15, i32 0, i32 0 + %coeff = load double, double* %16, align 8 + %17 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %15, i32 0, i32 1 + %op = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %18 = call double @Microsoft__Quantum__Math__AbsD__body(double %coeff) + %19 = fcmp oge double %18, 1.000000e-10 + br i1 %19, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Measure__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %op, i32 1) + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Callable*, %Array* }* + %23 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %22, i32 0, i32 1 + store %Callable* %20, %Callable** %23, align 8 + store %Array* %op, %Array** %24, align 8 + %25 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__42__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__27__FunctionTable, %Tuple* %21) + %termExpectation = call double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %inputStateUnitary, %Callable* %25, i64 %nQubits, i64 %nSamples) + %26 = load double, double* %jwTermEnergy, align 8 + %27 = fmul double 2.000000e+00, %termExpectation + %28 = fsub double %27, 1.000000e+00 + %29 = fmul double %28, %coeff + %30 = fadd double %26, %29 + store double %30, double* %jwTermEnergy, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %25, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %31 = add i64 %11, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %32 = load double, double* %jwTermEnergy, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + %33 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %39, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %34) + %37 = bitcast i8* %36 to %Array** + %38 = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %39 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 -1) + %40 = sub i64 %9, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %41 = phi i64 [ 0, %exit__3 ], [ %49, %exiting__4 ] + %42 = icmp sle i64 %41, %40 + br i1 %42, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %41) + %44 = bitcast i8* %43 to { double, %Array* }** + %45 = load { double, %Array* }*, { double, %Array* }** %44, align 8 + %46 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %45, i32 0, i32 1 + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %47, i32 -1) + %48 = bitcast { double, %Array* }* %45 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %49 = add i64 %41, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + ret double %32 +} + +define internal void @Lifted__PartialApplication__42__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Measure__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Result* }* + %7 = getelementptr inbounds { %Result* }, { %Result* }* %6, i32 0, i32 0 + store %Result* %5, %Result** %7, align 8 + ret void +} + +define internal void @MemoryManagement__27__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__27__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body(%Array* %data, %Array* %termType, i64 %idx) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %idx) + %14 = bitcast i8* %13 to { %Array*, %Array* }** + %15 = load { %Array*, %Array* }*, { %Array*, %Array* }** %14, align 8 + %16 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %15, %Array* %termType) + %17 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %18) + %21 = bitcast i8* %20 to { %Array*, %Array* }** + %22 = load { %Array*, %Array* }*, { %Array*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %22, i32 0, i32 0 + %24 = load %Array*, %Array** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %24, i32 -1) + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %22, i32 0, i32 1 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 -1) + %27 = bitcast { %Array*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %16 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %term, %Array* %termType) { +entry: + %0 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %term, i32 0, i32 0 + %idxFermions = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %term, i32 0, i32 1 + %coeff = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %2 = bitcast { %Array*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Array* }* + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + store %Array* %termType, %Array** %5, align 8 + store %Array* %coeff, %Array** %6, align 8 + %7 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %4, %Array* %idxFermions) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %7 +} + +define internal void @Lifted__PartialApplication__43__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { i64 }* + %6 = getelementptr inbounds { i64 }, { i64 }* %5, i32 0, i32 0 + %7 = load i64, i64* %6, align 4 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, i64 }* getelementptr ({ %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array*, i64 }* + %10 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 2 + store %Array* %2, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + store i64 %7, i64* %12, align 4 + %13 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load i64, i64* %3, align 4 + %7 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body(%Array* %4, %Array* %5, i64 %6) + %8 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %8, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %9, align 8 + ret void +} + +define internal void @MemoryManagement__28__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { %Array*, %Array* }** + %11 = load { %Array*, %Array* }*, { %Array*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %18 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__28__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { %Array*, %Array* }** + %11 = load { %Array*, %Array* }*, { %Array*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %18 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +attributes #0 = { nofree nosync nounwind readnone speculatable willreturn } diff --git a/src/munchkin/tests/qsharp/bit-flip-code/BitFlipCode.qs b/src/munchkin/tests/qsharp/bit-flip-code/BitFlipCode.qs new file mode 100644 index 0000000..88c00eb --- /dev/null +++ b/src/munchkin/tests/qsharp/bit-flip-code/BitFlipCode.qs @@ -0,0 +1,378 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Samples.BitFlipCode { + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.ErrorCorrection; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Arrays; + + + ////////////////////////////////////////////////////////////////////////// + // Introduction ////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////////////// + + // In this sample, we build on the discussion in the quantum error + // correction section of the developers' guide: + + // https://docs.microsoft.com/azure/quantum/user-guide/libraries/standard/error-correction + + // In particular, we start by manually encoding into the bit-flip code. + // We then show how operations and functions provided in the Q# canon + // allow us to easily model error correction in a way that immediately + // generalizes to other codes. + + ////////////////////////////////////////////////////////////////////////// + // The Bit-Flip Code ///////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////////////// + + // The bit-flip code protects against any one bit-flip (X) error on three + // qubits by mapping |0〉 to |̅0〉 ≔ |000〉 and |1〉 to |̅1〉 ≔ |111〉. By + // linearity, any other state |ψ〉 = α|0〉 + β|1〉 is represented by the + // logical state + + // |̅ψ〉 ≔ α |̅0〉 + β |̅1〉 + // = α |000〉 + β |111〉. + + // We start by defining an operation which implements an encoder for + // this code. To do so, note that CNOT allows us to "copy" classical + // information in the bitstrings used to label computational basis + // elements: + + // CNOT |b0〉 = |bb〉, + + // where b ∈ {0, 1}. This is not the same as copying the state, since + // CNOT acts linearly: + + // CNOT (α |0〉 + β |1〉) ⊗ |0〉 = α |00〉 + β |11〉. + + // That is, consistent with the no-cloning theorem, CNOT did not + // copy our arbitrary input state. On the other hand, this is + // precisely the transformation that we want here: + + // CNOT₀₂ · CNOT₀₁ (α |0〉 + β |1〉) ⊗ |00〉 + // = α |000〉 + β |111〉 + // = α |̅0〉 + β |̅1〉. + + // Thus, we can write out our encoder in a very simple form: + + /// # Summary + /// Given a qubit representing a state to be protected and two auxiliary + /// qubits initially in the |0〉 state, encodes the state into the + /// three-qubit bit-flip code. + /// + /// # Input + /// ## data + /// A qubit whose state is to be protected. + /// ## auxiliaryQubits + /// Two qubits, initially in the |00〉 state, to be used in protecting + /// the state of `data`. + operation EncodeIntoBitFlipCode (data : Qubit, auxiliaryQubits : Qubit[]) : Unit + // Since decoding is the adjoint of encoding, we must + // denote that this operation supports the Adjoint + // functor. + is Adj + Ctl + { + // We use the ApplyToEach operation from the canon, + // partially applied with the data qubit, to represent + // a "CNOT-ladder." In this case, the line below + // applies CNOT₀₁ · CNOT₀₂. + ApplyToEachCA(CNOT(data, _), auxiliaryQubits); + } + + // As a quick example, we will check that after encoding, the parity of + // each pair of qubits is positive (corresponding to the Zero) Result, + // such that we can learn syndrome information without revealing + // the state of an encoded qubit. + + /// # Summary + /// This operation encodes into a bit-flip code, and confirms that + /// the parity measurements Z₀Z₁ and Z₁Z₂ both return positive eigenvalues + /// (that is, the Result value Zero) without disturbing the state that + /// we are trying to protect. + /// + /// # Remarks + /// This operation will fail when the parity checks are incorrect + /// if run on a target machine which supports assertions, and thus + /// can be used as a unit test of error-correction functionality. + operation CheckBitFlipCodeStateParity () : Unit { + + // We start by preparing R_x(π / 3) |0〉 as our + // test state, along with two auxiliary qubits in the |00〉 + // state that we can use to encode. + use data = Qubit(); + use auxiliaryQubits = Qubit[2]; + let register = [data] + auxiliaryQubits; + Rx(PI() / 3.0, data); + + // Next, we encode our test state. + EncodeIntoBitFlipCode(data, auxiliaryQubits); + + // At this point, register represents a code block + // that protects the state R_x(π / 3) |0〉. + // We should thus be able to measure Z₀Z₁ and Z₁Z₂ + // without disturbing the code state. + // To check this, we proceed in two steps: + + // • Use Assert to ensure that the measurement + // will return Zero. + // • Use M to actually perform the measurement. + + // If our target machine is a simulator, the first step + // will cause our quantum program to crash if the assertion + // fails. Since an assertion is not a physical operation, + // the state of the qubits that we pass to Assert are not + // disturbed. If our target machine is an actual quantum + // processor, then the assertion will be skipped with no + // further effect. + AssertMeasurement([PauliZ, PauliZ, PauliI], register, Zero, "Z₀Z₁ was One!"); + AssertMeasurement([PauliI, PauliZ, PauliZ], register, Zero, "Z₁Z₂ was One!"); + + // The second step then actually performs the measurement, + // showing that we can make parity measurements without + // disturbing the state that we care about. + let parity01 = Measure([PauliZ, PauliZ, PauliI], register); + let parity12 = Measure([PauliI, PauliZ, PauliZ], register); + + // To check that we have not disturbed the state, we decode, + // rotate back, and assert once more. + Adjoint EncodeIntoBitFlipCode(data, auxiliaryQubits); + Adjoint Rx(PI() / 3.0, data); + AssertMeasurement([PauliZ], [data], Zero, "Didn't return to |0〉!"); + } + + + // Now that we're assured we can measure Z₀Z₁ and Z₁Z₂ without disturbing + // the state of interest, let's use that to actually extract a syndrome + // and recover from a bit-flip error. + + // Starting with the previous operation as a template, we'll remove + // the assertions for the parity checks and allow for an error operation + // to be passed as an input, then will modify it to use `parity01` and + // `parity12` to perform the correction. + + // To take an error operation as an argument, we declare an input + // of type (Qubit[] => ()), representing something that can happen + // to an array of qubits. That is, we take the error to be applied + // in a black-box sense. + + /// # Summary + /// This operation encodes into a bit-flip code, and confirms that + /// it can correct a given error applied to the logical state + /// that results from encoding R_x(π / 3) |0〉. + /// + /// # Input + /// ## error + /// An operation representing an error to be applied to the code + /// block. + /// + /// # Remarks + /// This operation will fail when the error correction step fails + /// if run on a target machine which supports assertions, and thus + /// can be used as a unit test of error-correction functionality. + operation CheckBitFlipCodeCorrectsError(error : (Qubit[] => Unit)) : Unit { + use data = Qubit(); + use auxiliaryQubits = Qubit[2]; + let register = [data] + auxiliaryQubits; + + // We start by proceeding the same way as above + // in order to obtain the code block state |̅ψ〉. + Rx(PI() / 3.0, data); + EncodeIntoBitFlipCode(data, auxiliaryQubits); + + // Next, we apply the error that we've been given to the + // entire register. + error(register); + + // We measure the two parities Z₀Z₁ and Z₁z₂ as before + // to obtain our syndrome. + let parity01 = Measure([PauliZ, PauliZ, PauliI], register); + let parity12 = Measure([PauliI, PauliZ, PauliZ], register); + + // To use the syndrome obtained above, we recall the table + // from : + + // Error | Z₀Z₁ | Z₁Z₂ + // =================== + // 1 | Zero | Zero + // X₀ | One | Zero + // X₁ | One | One + // X₂ | Zero | One + + // Since the recovery is a classical inference procedure, we + // can represent it here by using if/elif statements: + if (parity01 == One and parity12 == Zero) { + X(register[0]); + } + elif (parity01 == One and parity12 == One) { + X(register[1]); + } + elif (parity01 == Zero and parity12 == One) { + X(register[2]); + } + + // To check that we have not disturbed the state, we decode, + // rotate back, and assert once more. + Adjoint EncodeIntoBitFlipCode(data, auxiliaryQubits); + Adjoint Rx(PI() / 3.0, data); + AssertMeasurement([PauliZ], [data], Zero, "Didn't return to |0〉!"); + } + + + // Now that we have defined an operation which fails if the bit-flip + // code fails to protect a state from a given error, we can call it + // with the specific errors that the bit-flip code can correct. + // To do so, it is helpful to use the ApplyPauli operation from + // the canon, which takes an array of Pauli values and applies the + // corresponding sequence of operation. + + // For example, + + // ApplyPauli([PauliX, PauliY, PauliZ, PauliI], register); + + // is equivalent to + + // X(register[0]); + // Y(register[1]); + // Z(register[2]); + + // If we partially apply ApplyPauli, we get an operation that + // represents applying a specific multi-qubit Pauli operator. + // For instance, + + // ApplyPauli([PauliX, PauliI, PauliI], _) + + // is an operation of type (Qubit[] => ()) that represents + // the X₀ bit-flip error. + + /// # Summary + /// For each single-qubit bit-flip error on three qubits, this operation + /// encodes R_x(π / 3) |0〉 into the bit-flip code and asserts that the + /// code protects against that error. + /// + /// # Remarks + /// This operation will fail when error correction fails + /// if run on a target machine which supports assertions, and thus + /// can be used as a unit test of error-correction functionality. + operation CheckBitFlipCodeCorrectsBitFlipErrors() : Unit { + // First, we declare our errors using the notation + // described above. + let X0 = ApplyPauli([PauliX, PauliI, PauliI], _); + let X1 = ApplyPauli([PauliI, PauliX, PauliI], _); + let X2 = ApplyPauli([PauliI, PauliI, PauliX], _); + + // For each of these errors, we can then check + // that the bit flip code corrects them appropriately. + CheckBitFlipCodeCorrectsError(X0); + CheckBitFlipCodeCorrectsError(X1); + CheckBitFlipCodeCorrectsError(X2); + } + + + // Finally, we show how the logic described in this sample can be + // generalized by using functionality from the canon. This will allow + // us to consider much more involved error-correcting codes using the + // same interface as the bit-flip code discussed here. + // To underscore this point, we write our new operation to take a QECC + // value as its input, where QECC is a type provided by the canon to + // collect all of the relevant information about an error-correcting code. + + // The canon separates the role of the classical recovery process from + // the rest of an error-correcting code, allowing for recovery functions + // which use prior information about error models to improve code + // performance. Thus, we take a separate input of type RecoveryFn, a + // canon type used to denote functions which fulfill this role. + + /// # Summary + /// This operation encodes into an arbitrary code, and confirms that + /// it can correct a given error applied to the logical state + /// that results from encoding R_x(π / 3) |0〉. + /// + /// # Input + /// ## error + /// An operation representing an error to be applied to the code + /// block. + /// + /// # Remarks + /// This operation will fail when the error correction step fails + /// if run on a target machine which supports assertions, and thus + /// can be used as a unit test of error-correction functionality. + operation CheckCodeCorrectsError(code : QECC, nScratch : Int, fn : RecoveryFn, error : (Qubit[] => Unit)) : Unit { + + // We once again begin by allocating some qubits to use as data + // and auxiliary qubits, and by preparing a test state on the + // data qubit. + use data = Qubit(); + use auxiliaryQubits = Qubit[nScratch]; + // We start by proceeding the same way as above + // in order to obtain the code block state |̅ψ〉. + let register = [data] + auxiliaryQubits; + + Rx(PI() / 3.0, data); + + // We differ this time, however, in how we perform the + // encoding. The code input provided to this operation + // specifies an encoder, a decoder, and a syndrome + // measurement. Deconstructing that tuple will give us access + // to all three operations. + let (encode, decode, syndMeas) = code!; + + // We can now encode as usual, with the slight exception + // that the encoder returns a value of a new user-defined type + // that marks the register as encoding a state. + // This is simply another "view" on the same qubits, but + // allows us to write operations which only act on code + // blocks. + // Note that we also pass data as an array of qubits, to + // allow for codes which protect multiple qubits in one block. + let codeBlock = encode!([data], auxiliaryQubits); + + // Next, we cause an error as usual. + error(codeBlock!); + + // We can then ask the canon to perform the recovery, using + // our classical recovery procedure along with the code of + // interest. + Recover(code, fn, codeBlock); + + // Having recovered, we can decode to obtain new qubit arrays + // pointing to the decoded data and auxiliary qubits. + let (decodedData, decodedAuxiliary) = decode!(codeBlock); + + // Finally, we test that our test state was protected. + Adjoint Rx(PI() / 3.0, data); + AssertMeasurement([PauliZ], [data], Zero, "Didn't return to |0〉!"); + } + + + // We will now write one last test that calls the new operation with + // the BitFlipCode and BitFlipRecoveryFn provided by the canon. + // Try replacing these with calls to other codes provided by the + // canon! + + /// # Summary + /// For each single-qubit bit-flip error on three qubits, this operation + /// encodes R_x(π / 3) |0〉 into the bit-flip code and asserts that the + /// code protects against that error. + /// + /// # Remarks + /// This operation will fail when error correction fails + /// if run on a target machine which supports assertions, and thus + /// can be used as a unit test of error-correction functionality. + operation CheckCanonBitFlipCodeCorrectsBitFlipErrors() : Unit { + let code = BitFlipCode(); + let recoveryFn = BitFlipRecoveryFn(); + let X0 = ApplyPauli([PauliX, PauliI, PauliI], _); + let X1 = ApplyPauli([PauliI, PauliX, PauliI], _); + let X2 = ApplyPauli([PauliI, PauliI, PauliX], _); + + // For each of these errors, we can then check + // that the bit flip code corrects them appropriately. + for error in [X0, X1, X2] { + CheckCodeCorrectsError(code, 2, recoveryFn, error); + } + } + +} diff --git a/src/munchkin/tests/qsharp/bit-flip-code/Program.qs b/src/munchkin/tests/qsharp/bit-flip-code/Program.qs new file mode 100644 index 0000000..5f983b4 --- /dev/null +++ b/src/munchkin/tests/qsharp/bit-flip-code/Program.qs @@ -0,0 +1,52 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace BitFlipCode { + + open Microsoft.Quantum.Samples.BitFlipCode; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Intrinsic; + + @EntryPoint() + operation Program () : Unit { + + // We call the CheckBitFlipCodeStateParity + // operation defined in BitFlipCode. This operation encodes + // into a bit-flip code, such that + // + // α |0〉 + β |1〉 + // + // is encoded into + // + // α |000〉 + β |111〉, + // + // then ensures that the parity measurements Z₀Z₁ and + // Z₁Z₂ both return the result Zero, indicating the eigenvalue + // (-1)⁰ is positive. + + // This check is implemented as a sequence of assertions. + // Since we are using a target machine which supports assertions, + // this implies that if flow control continues past the operation + // invocation, then all of the relevant checks have passed. + + CheckBitFlipCodeStateParity(); + Message("Parity check passed successfully!"); + + // We call the operation + // CheckBitFlipCodeCorrectsBitFlipErrors to check that the bit- + // flip code actually protects against bit-flip errors. + // As before, this operation fails if an error is not corrected + // properly. In the UnitTesting sample, we will see how to + // represent this pattern in terms of unit tests. + + CheckBitFlipCodeCorrectsBitFlipErrors(); + Message("Corrected all three bit-flip errors successfully!"); + + // In this region, we repeat the check from above, this time using + // operations and data types from the canon to allow us to + // represent other codes. + + CheckCanonBitFlipCodeCorrectsBitFlipErrors(); + Message("Corrected all three bit-flip errors successfully!"); + } +} diff --git a/src/munchkin/tests/qsharp/bit-flip-code/bit-flip-code.csproj b/src/munchkin/tests/qsharp/bit-flip-code/bit-flip-code.csproj new file mode 100644 index 0000000..f930576 --- /dev/null +++ b/src/munchkin/tests/qsharp/bit-flip-code/bit-flip-code.csproj @@ -0,0 +1,22 @@ + + + + Library + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + Always + + + + diff --git a/src/munchkin/tests/qsharp/bit-flip-code/libLLVM.dll b/src/munchkin/tests/qsharp/bit-flip-code/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/bit-flip-code/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/bit-flip-code/qir/bit-flip-code.ll b/src/munchkin/tests/qsharp/bit-flip-code/qir/bit-flip-code.ll new file mode 100644 index 0000000..ce11bfb --- /dev/null +++ b/src/munchkin/tests/qsharp/bit-flip-code/qir/bit-flip-code.ll @@ -0,0 +1,5745 @@ + +%Tuple = type opaque +%Callable = type opaque +%Array = type opaque +%Qubit = type opaque +%Result = type opaque +%String = type opaque +%Range = type { i64, i64, i64 } + +@PartialApplication__1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApplyPauli__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyPauli__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyPauli__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyPauli__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyPauli__ctladj__wrapper] +@MemoryManagement__1__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctladj__wrapper] +@PartialApplication__3__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] +@0 = internal constant [22 x i8] c"Didn't return to |0\E2\8C\00" +@1 = internal constant [14 x i8] c"Z\E2\82\80Z\E2\82\81 was \00" +@2 = internal constant [14 x i8] c"Z\E2\82\81Z\E2\82\82 was \00" +@PartialApplication__4__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@PartialApplication__5__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@PartialApplication__6__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@PartialApplication__7__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__CNOT__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper] +@MemoryManagement__2__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@PartialApplication__8__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctladj__wrapper] +@PartialApplication__9__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] +@PartialApplication__10__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@3 = internal constant [34 x i8] c"Parity check passed successfully!\00" +@4 = internal constant [50 x i8] c"Corrected all three bit-flip errors successfully!\00" +@5 = internal constant [46 x i8] c"`Length(bits)` must be less than 64, but was \00" +@6 = internal constant [2 x i8] c".\00" +@Microsoft__Quantum__Convert__ResultAsBool__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Convert__ResultAsBool__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon__ApplyP__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__ctladj__wrapper] +@PartialApplication__11__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__3__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__3__RefCount, void (%Tuple*, i32)* @MemoryManagement__3__AliasCount] +@Microsoft__Quantum__ErrorCorrection__EncodeIntoBitFlipCode__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__ErrorCorrection__EncodeIntoBitFlipCode__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__ErrorCorrection__DecodeFromBitFlipCode__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__ErrorCorrection__DecodeFromBitFlipCode__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__12__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__ErrorCorrection__MeasureStabilizerGenerators__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__ErrorCorrection__MeasureStabilizerGenerators__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Measurement__MeasureWithScratch__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Measurement__MeasureWithScratch__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__4__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__4__RefCount, void (%Tuple*, i32)* @MemoryManagement__4__AliasCount] +@PartialApplication__13__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__ErrorCorrection__TableLookupRecoveryImpl__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__ErrorCorrection__TableLookupRecoveryImpl__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__5__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__5__RefCount, void (%Tuple*, i32)* @MemoryManagement__5__AliasCount] + +define void @Microsoft__Quantum__Samples__BitFlipCode__CheckBitFlipCodeCorrectsBitFlipErrors__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 0) + %3 = bitcast i8* %2 to i2* + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 1) + %5 = bitcast i8* %4 to i2* + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 2) + %7 = bitcast i8* %6 to i2* + store i2 1, i2* %3, align 1 + store i2 0, i2* %5, align 1 + store i2 0, i2* %7, align 1 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %0, %Callable** %10, align 8 + store %Array* %1, %Array** %11, align 8 + %X0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %8) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X0, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X0, i32 1) + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %13 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 0) + %15 = bitcast i8* %14 to i2* + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 1) + %17 = bitcast i8* %16 to i2* + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 2) + %19 = bitcast i8* %18 to i2* + store i2 0, i2* %15, align 1 + store i2 1, i2* %17, align 1 + store i2 0, i2* %19, align 1 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Callable*, %Array* }* + %22 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %21, i32 0, i32 1 + store %Callable* %12, %Callable** %22, align 8 + store %Array* %13, %Array** %23, align 8 + %X1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %20) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X1, i32 1) + %24 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 2) + %31 = bitcast i8* %30 to i2* + store i2 0, i2* %27, align 1 + store i2 0, i2* %29, align 1 + store i2 1, i2* %31, align 1 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Array* }* + %34 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %33, i32 0, i32 1 + store %Callable* %24, %Callable** %34, align 8 + store %Array* %25, %Array** %35, align 8 + %X2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %32) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X2, i32 1) + call void @Microsoft__Quantum__Samples__BitFlipCode__CheckBitFlipCodeCorrectsError__body(%Callable* %X0) + call void @Microsoft__Quantum__Samples__BitFlipCode__CheckBitFlipCodeCorrectsError__body(%Callable* %X1) + call void @Microsoft__Quantum__Samples__BitFlipCode__CheckBitFlipCodeCorrectsError__body(%Callable* %X2) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X0, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X0, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X0, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X2, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyPauli__body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyPauli__adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyPauli__ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyPauli__ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define void @Microsoft__Quantum__Samples__BitFlipCode__CheckBitFlipCodeCorrectsError__body(%Callable* %error) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %error, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %error, i32 1) + %data = call %Qubit* @__quantum__rt__qubit_allocate() + %auxiliaryQubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 1) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to %Qubit** + store %Qubit* %data, %Qubit** %2, align 8 + %register = call %Array* @__quantum__rt__array_concatenate(%Array* %0, %Array* %auxiliaryQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call double @Microsoft__Quantum__Math__PI__body() + %4 = fdiv double %3, 3.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %4, %Qubit* %data) + call void @Microsoft__Quantum__Samples__BitFlipCode__EncodeIntoBitFlipCode__body(%Qubit* %data, %Array* %auxiliaryQubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %register, %Array** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %error, %Tuple* %5, %Tuple* null) + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %9 = bitcast i8* %8 to i2* + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 1) + %11 = bitcast i8* %10 to i2* + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 2) + %13 = bitcast i8* %12 to i2* + store i2 -2, i2* %9, align 1 + store i2 -2, i2* %11, align 1 + store i2 0, i2* %13, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %parity01 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + %bases__1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases__1, i64 0) + %15 = bitcast i8* %14 to i2* + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases__1, i64 1) + %17 = bitcast i8* %16 to i2* + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases__1, i64 2) + %19 = bitcast i8* %18 to i2* + store i2 0, i2* %15, align 1 + store i2 -2, i2* %17, align 1 + store i2 -2, i2* %19, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %parity12 = call %Result* @__quantum__qis__measure__body(%Array* %bases__1, %Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %bases__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases__1, i32 -1) + %20 = call %Result* @__quantum__rt__result_get_one() + %21 = call i1 @__quantum__rt__result_equal(%Result* %parity01, %Result* %20) + br i1 %21, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %22 = call %Result* @__quantum__rt__result_get_zero() + %23 = call i1 @__quantum__rt__result_equal(%Result* %parity12, %Result* %22) + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %24 = phi i1 [ %23, %condTrue__1 ], [ %21, %entry ] + br i1 %24, label %then0__1, label %test1__1 + +then0__1: ; preds = %condContinue__1 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %26 = bitcast i8* %25 to %Qubit** + %qubit = load %Qubit*, %Qubit** %26, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %condContinue__1 + %27 = call %Result* @__quantum__rt__result_get_one() + %28 = call i1 @__quantum__rt__result_equal(%Result* %parity01, %Result* %27) + br i1 %28, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %test1__1 + %29 = call %Result* @__quantum__rt__result_get_one() + %30 = call i1 @__quantum__rt__result_equal(%Result* %parity12, %Result* %29) + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %test1__1 + %31 = phi i1 [ %30, %condTrue__2 ], [ %28, %test1__1 ] + br i1 %31, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 1) + %33 = bitcast i8* %32 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %33, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__1) + br label %continue__1 + +test2__1: ; preds = %condContinue__2 + %34 = call %Result* @__quantum__rt__result_get_zero() + %35 = call i1 @__quantum__rt__result_equal(%Result* %parity01, %Result* %34) + br i1 %35, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %test2__1 + %36 = call %Result* @__quantum__rt__result_get_one() + %37 = call i1 @__quantum__rt__result_equal(%Result* %parity12, %Result* %36) + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %test2__1 + %38 = phi i1 [ %37, %condTrue__3 ], [ %35, %test2__1 ] + br i1 %38, label %then2__1, label %continue__1 + +then2__1: ; preds = %condContinue__3 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 2) + %40 = bitcast i8* %39 to %Qubit** + %qubit__2 = load %Qubit*, %Qubit** %40, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit__2) + br label %continue__1 + +continue__1: ; preds = %then2__1, %condContinue__3, %then1__1, %then0__1 + call void @Microsoft__Quantum__Samples__BitFlipCode__EncodeIntoBitFlipCode__adj(%Qubit* %data, %Array* %auxiliaryQubits) + %41 = call double @Microsoft__Quantum__Math__PI__body() + %42 = fdiv double %41, 3.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %42, %Qubit* %data) + %43 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 0) + %45 = bitcast i8* %44 to i2* + store i2 -2, i2* %45, align 1 + %46 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %46, i64 0) + %48 = bitcast i8* %47 to %Qubit** + store %Qubit* %data, %Qubit** %48, align 8 + %49 = call %Result* @__quantum__rt__result_get_zero() + %50 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @0, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %43, %Array* %46, %Result* %49, %String* %50) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %parity01, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %parity12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %43, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %50, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %auxiliaryQubits) + call void @__quantum__rt__qubit_release(%Qubit* %data) + call void @__quantum__rt__capture_update_alias_count(%Callable* %error, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %error, i32 -1) + ret void +} + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__body(%Array* %pauli, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___4c71f94c9bd54353b668964b77a208ce_Zipped__body(%Array* %pauli, %Array* %target) + call void @Microsoft__Quantum__Canon___7b778b591d0f4b348622943561272052_ApplyToEachCA__body(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { i2, %Qubit* }** + %8 = load { i2, %Qubit* }*, { i2, %Qubit* }** %7, align 8 + %9 = bitcast { i2, %Qubit* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__adj(%Array* %pauli, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___4c71f94c9bd54353b668964b77a208ce_Zipped__body(%Array* %pauli, %Array* %target) + call void @Microsoft__Quantum__Canon___7b778b591d0f4b348622943561272052_ApplyToEachCA__adj(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { i2, %Qubit* }** + %8 = load { i2, %Qubit* }*, { i2, %Qubit* }** %7, align 8 + %9 = bitcast { i2, %Qubit* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___4c71f94c9bd54353b668964b77a208ce_Zipped__body(%Array* %pauli, %Array* %target) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___7b778b591d0f4b348622943561272052_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %10 = sub i64 %9, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %11 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %11) + %14 = bitcast i8* %13 to { i2, %Qubit* }** + %15 = load { i2, %Qubit* }*, { i2, %Qubit* }** %14, align 8 + %16 = bitcast { i2, %Qubit* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___4c71f94c9bd54353b668964b77a208ce_Zipped__body(%Array* %pauli, %Array* %target) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___7b778b591d0f4b348622943561272052_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %10 = sub i64 %9, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %11 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %11) + %14 = bitcast i8* %13 to { i2, %Qubit* }** + %15 = load { i2, %Qubit* }*, { i2, %Qubit* }** %14, align 8 + %16 = bitcast { i2, %Qubit* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 1, double %theta, %Qubit* %qubit) + ret void +} + +define internal double @Microsoft__Quantum__Math__PI__body() { +entry: + ret double 0x400921FB54442D18 +} + +define void @Microsoft__Quantum__Samples__BitFlipCode__EncodeIntoBitFlipCode__body(%Qubit* %data, %Array* %auxiliaryQubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__CNOT__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Qubit* }, { %Callable*, %Qubit* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Qubit* }* + %3 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Qubit* %data, %Qubit** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %1) + call void @Microsoft__Quantum__Canon___d5999d2381314c67a04b2c24f6c12486_ApplyToEachCA__body(%Callable* %5, %Array* %auxiliaryQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + ret void +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +declare %Result* @__quantum__rt__result_get_zero() + +declare void @__quantum__qis__x__body(%Qubit*) + +define void @Microsoft__Quantum__Samples__BitFlipCode__EncodeIntoBitFlipCode__adj(%Qubit* %data, %Array* %auxiliaryQubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__CNOT__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Qubit* }, { %Callable*, %Qubit* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Qubit* }* + %3 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Qubit* %data, %Qubit** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %1) + call void @Microsoft__Quantum__Canon___d5999d2381314c67a04b2c24f6c12486_ApplyToEachCA__adj(%Callable* %5, %Array* %auxiliaryQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qubit) { +entry: + %theta__1 = fneg double %theta + call void @__quantum__qis__r__body(i2 1, double %theta__1, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double 1.000000e+00, %String* %msg, double 1.000000e-10) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +define void @Microsoft__Quantum__Samples__BitFlipCode__CheckBitFlipCodeStateParity__body() { +entry: + %data = call %Qubit* @__quantum__rt__qubit_allocate() + %auxiliaryQubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 1) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to %Qubit** + store %Qubit* %data, %Qubit** %2, align 8 + %register = call %Array* @__quantum__rt__array_concatenate(%Array* %0, %Array* %auxiliaryQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call double @Microsoft__Quantum__Math__PI__body() + %4 = fdiv double %3, 3.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %4, %Qubit* %data) + call void @Microsoft__Quantum__Samples__BitFlipCode__EncodeIntoBitFlipCode__body(%Qubit* %data, %Array* %auxiliaryQubits) + %5 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 0) + %7 = bitcast i8* %6 to i2* + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 1) + %9 = bitcast i8* %8 to i2* + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 2) + %11 = bitcast i8* %10 to i2* + store i2 -2, i2* %7, align 1 + store i2 -2, i2* %9, align 1 + store i2 0, i2* %11, align 1 + %12 = call %Result* @__quantum__rt__result_get_zero() + %13 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @1, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %5, %Array* %register, %Result* %12, %String* %13) + %14 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 0) + %16 = bitcast i8* %15 to i2* + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 1) + %18 = bitcast i8* %17 to i2* + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 2) + %20 = bitcast i8* %19 to i2* + store i2 0, i2* %16, align 1 + store i2 -2, i2* %18, align 1 + store i2 -2, i2* %20, align 1 + %21 = call %Result* @__quantum__rt__result_get_zero() + %22 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([14 x i8], [14 x i8]* @2, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %14, %Array* %register, %Result* %21, %String* %22) + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %24 = bitcast i8* %23 to i2* + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 1) + %26 = bitcast i8* %25 to i2* + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 2) + %28 = bitcast i8* %27 to i2* + store i2 -2, i2* %24, align 1 + store i2 -2, i2* %26, align 1 + store i2 0, i2* %28, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %parity01 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + %bases__1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases__1, i64 0) + %30 = bitcast i8* %29 to i2* + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases__1, i64 1) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases__1, i64 2) + %34 = bitcast i8* %33 to i2* + store i2 0, i2* %30, align 1 + store i2 -2, i2* %32, align 1 + store i2 -2, i2* %34, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %parity12 = call %Result* @__quantum__qis__measure__body(%Array* %bases__1, %Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %bases__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases__1, i32 -1) + call void @Microsoft__Quantum__Samples__BitFlipCode__EncodeIntoBitFlipCode__adj(%Qubit* %data, %Array* %auxiliaryQubits) + %35 = call double @Microsoft__Quantum__Math__PI__body() + %36 = fdiv double %35, 3.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %36, %Qubit* %data) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to i2* + store i2 -2, i2* %39, align 1 + %40 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 0) + %42 = bitcast i8* %41 to %Qubit** + store %Qubit* %data, %Qubit** %42, align 8 + %43 = call %Result* @__quantum__rt__result_get_zero() + %44 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @0, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %37, %Array* %40, %Result* %43, %String* %44) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %parity01, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %parity12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %auxiliaryQubits) + call void @__quantum__rt__qubit_release(%Qubit* %data) + ret void +} + +define void @Microsoft__Quantum__Samples__BitFlipCode__CheckCanonBitFlipCodeCorrectsBitFlipErrors__body() { +entry: + %code = call { { %Callable* }*, { %Callable* }*, { %Callable* }* }* @Microsoft__Quantum__ErrorCorrection__BitFlipCode__body() + %0 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 1 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 2 + %11 = load { %Callable* }*, { %Callable* }** %10, align 8 + %12 = getelementptr inbounds { %Callable* }, { %Callable* }* %11, i32 0, i32 0 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %recoveryFn = call { %Callable* }* @Microsoft__Quantum__ErrorCorrection__BitFlipRecoveryFn__body() + %16 = getelementptr inbounds { %Callable* }, { %Callable* }* %recoveryFn, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %17, i32 1) + %18 = bitcast { %Callable* }* %recoveryFn to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + %19 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %20 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 0) + %22 = bitcast i8* %21 to i2* + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 1) + %24 = bitcast i8* %23 to i2* + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 2) + %26 = bitcast i8* %25 to i2* + store i2 1, i2* %22, align 1 + store i2 0, i2* %24, align 1 + store i2 0, i2* %26, align 1 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Callable*, %Array* }* + %29 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %28, i32 0, i32 1 + store %Callable* %19, %Callable** %29, align 8 + store %Array* %20, %Array** %30, align 8 + %X0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %27) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X0, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X0, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %32 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 0) + %34 = bitcast i8* %33 to i2* + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 1) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 2) + %38 = bitcast i8* %37 to i2* + store i2 0, i2* %34, align 1 + store i2 1, i2* %36, align 1 + store i2 0, i2* %38, align 1 + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, %Array* }* + %41 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %40, i32 0, i32 1 + store %Callable* %31, %Callable** %41, align 8 + store %Array* %32, %Array** %42, align 8 + %X1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X1, i32 1) + %43 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %44 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 0) + %46 = bitcast i8* %45 to i2* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 1) + %48 = bitcast i8* %47 to i2* + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 2) + %50 = bitcast i8* %49 to i2* + store i2 0, i2* %46, align 1 + store i2 0, i2* %48, align 1 + store i2 1, i2* %50, align 1 + %51 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %52 = bitcast %Tuple* %51 to { %Callable*, %Array* }* + %53 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %52, i32 0, i32 0 + %54 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %52, i32 0, i32 1 + store %Callable* %43, %Callable** %53, align 8 + store %Array* %44, %Array** %54, align 8 + %X2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %51) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X2, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X0, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X0, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X1, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X2, i32 1) + %55 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 3) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to %Callable** + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 1) + %59 = bitcast i8* %58 to %Callable** + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 2) + %61 = bitcast i8* %60 to %Callable** + store %Callable* %X0, %Callable** %57, align 8 + store %Callable* %X1, %Callable** %59, align 8 + store %Callable* %X2, %Callable** %61, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %62 = phi i64 [ 0, %entry ], [ %66, %exiting__1 ] + %63 = icmp sle i64 %62, 2 + br i1 %63, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 %62) + %65 = bitcast i8* %64 to %Callable** + %error = load %Callable*, %Callable** %65, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %error, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %error, i32 1) + call void @Microsoft__Quantum__Samples__BitFlipCode__CheckCodeCorrectsError__body({ { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i64 2, { %Callable* }* %recoveryFn, %Callable* %error) + call void @__quantum__rt__capture_update_alias_count(%Callable* %error, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %error, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %66 = add i64 %62, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X0, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X0, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %X2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %X2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X0, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %X2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %X2, i32 -1) + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %67 = phi i64 [ 0, %exit__1 ], [ %72, %exiting__2 ] + %68 = icmp sle i64 %67, 2 + br i1 %68, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 %67) + %70 = bitcast i8* %69 to %Callable** + %71 = load %Callable*, %Callable** %70, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %71, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %71, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %72 = add i64 %67, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + ret void +} + +define internal { { %Callable* }*, { %Callable* }*, { %Callable* }* }* @Microsoft__Quantum__ErrorCorrection__BitFlipCode__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__ErrorCorrection__EncodeIntoBitFlipCode__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %e = call { %Callable* }* @Microsoft__Quantum__ErrorCorrection__EncodeOp__body(%Callable* %0) + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %e, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %e to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__ErrorCorrection__DecodeFromBitFlipCode__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %d = call { %Callable* }* @Microsoft__Quantum__ErrorCorrection__DecodeOp__body(%Callable* %4) + %5 = getelementptr inbounds { %Callable* }, { %Callable* }* %d, i32 0, i32 0 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + %7 = bitcast { %Callable* }* %d to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__ErrorCorrection__MeasureStabilizerGenerators__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 0) + %11 = bitcast i8* %10 to i2* + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 1) + %13 = bitcast i8* %12 to i2* + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 2) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %11, align 1 + store i2 -2, i2* %13, align 1 + store i2 0, i2* %15, align 1 + %16 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %16, i64 0) + %18 = bitcast i8* %17 to i2* + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %16, i64 1) + %20 = bitcast i8* %19 to i2* + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %16, i64 2) + %22 = bitcast i8* %21 to i2* + store i2 0, i2* %18, align 1 + store i2 -2, i2* %20, align 1 + store i2 -2, i2* %22, align 1 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 0) + %25 = bitcast i8* %24 to %Array** + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 1) + %27 = bitcast i8* %26 to %Array** + store %Array* %9, %Array** %25, align 8 + store %Array* %16, %Array** %27, align 8 + %28 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Measurement__MeasureWithScratch__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Callable*, %Array*, %Callable* }* + %31 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %30, i32 0, i32 1 + %33 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %30, i32 0, i32 2 + store %Callable* %8, %Callable** %31, align 8 + store %Array* %23, %Array** %32, align 8 + store %Callable* %28, %Callable** %33, align 8 + %34 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__4__FunctionTable, %Tuple* %29) + %s = call { %Callable* }* @Microsoft__Quantum__ErrorCorrection__SyndromeMeasOp__body(%Callable* %34) + %35 = getelementptr inbounds { %Callable* }, { %Callable* }* %s, i32 0, i32 0 + %36 = load %Callable*, %Callable** %35, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %36, i32 1) + %37 = bitcast { %Callable* }* %s to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %37, i32 1) + %code = call { { %Callable* }*, { %Callable* }*, { %Callable* }* }* @Microsoft__Quantum__ErrorCorrection__QECC__body({ %Callable* }* %e, { %Callable* }* %d, { %Callable* }* %s) + %38 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 0 + %39 = load { %Callable* }*, { %Callable* }** %38, align 8 + %40 = getelementptr inbounds { %Callable* }, { %Callable* }* %39, i32 0, i32 0 + %41 = load %Callable*, %Callable** %40, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %41, i32 1) + %42 = bitcast { %Callable* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + %43 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 1 + %44 = load { %Callable* }*, { %Callable* }** %43, align 8 + %45 = getelementptr inbounds { %Callable* }, { %Callable* }* %44, i32 0, i32 0 + %46 = load %Callable*, %Callable** %45, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %46, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %46, i32 1) + %47 = bitcast { %Callable* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 1) + %48 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 2 + %49 = load { %Callable* }*, { %Callable* }** %48, align 8 + %50 = getelementptr inbounds { %Callable* }, { %Callable* }* %49, i32 0, i32 0 + %51 = load %Callable*, %Callable** %50, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %51, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %51, i32 1) + %52 = bitcast { %Callable* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 1) + %53 = bitcast { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %36, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %41, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %46, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %51, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %51, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + ret { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code +} + +define internal { %Callable* }* @Microsoft__Quantum__ErrorCorrection__BitFlipRecoveryFn__body() { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 1) + %4 = bitcast i8* %3 to i2* + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 2) + %6 = bitcast i8* %5 to i2* + store i2 0, i2* %2, align 1 + store i2 0, i2* %4, align 1 + store i2 0, i2* %6, align 1 + %7 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 0) + %9 = bitcast i8* %8 to i2* + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 1) + %11 = bitcast i8* %10 to i2* + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 2) + %13 = bitcast i8* %12 to i2* + store i2 1, i2* %9, align 1 + store i2 0, i2* %11, align 1 + store i2 0, i2* %13, align 1 + %14 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 0) + %16 = bitcast i8* %15 to i2* + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 1) + %18 = bitcast i8* %17 to i2* + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 2) + %20 = bitcast i8* %19 to i2* + store i2 0, i2* %16, align 1 + store i2 0, i2* %18, align 1 + store i2 1, i2* %20, align 1 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 3) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to i2* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 1) + %25 = bitcast i8* %24 to i2* + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 2) + %27 = bitcast i8* %26 to i2* + store i2 0, i2* %23, align 1 + store i2 1, i2* %25, align 1 + store i2 0, i2* %27, align 1 + %28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 0) + %30 = bitcast i8* %29 to %Array** + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 1) + %32 = bitcast i8* %31 to %Array** + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 2) + %34 = bitcast i8* %33 to %Array** + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 3) + %36 = bitcast i8* %35 to %Array** + store %Array* %0, %Array** %30, align 8 + store %Array* %7, %Array** %32, align 8 + store %Array* %14, %Array** %34, align 8 + store %Array* %21, %Array** %36, align 8 + %37 = call { %Callable* }* @Microsoft__Quantum__ErrorCorrection__TableLookupRecovery__body(%Array* %28) + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %38 = phi i64 [ 0, %entry ], [ %43, %exiting__1 ] + %39 = icmp sle i64 %38, 3 + br i1 %39, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %38) + %41 = bitcast i8* %40 to %Array** + %42 = load %Array*, %Array** %41, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %43 = add i64 %38, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + ret { %Callable* }* %37 +} + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define void @Microsoft__Quantum__Samples__BitFlipCode__CheckCodeCorrectsError__body({ { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i64 %nScratch, { %Callable* }* %fn, %Callable* %error) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 0 + %encode = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %encode, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %encode to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 1 + %decode = load { %Callable* }*, { %Callable* }** %4, align 8 + %5 = getelementptr inbounds { %Callable* }, { %Callable* }* %decode, i32 0, i32 0 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + %7 = bitcast { %Callable* }* %decode to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 2 + %syndMeas = load { %Callable* }*, { %Callable* }** %8, align 8 + %9 = getelementptr inbounds { %Callable* }, { %Callable* }* %syndMeas, i32 0, i32 0 + %10 = load %Callable*, %Callable** %9, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %10, i32 1) + %11 = bitcast { %Callable* }* %syndMeas to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = bitcast { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = getelementptr inbounds { %Callable* }, { %Callable* }* %fn, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + %15 = bitcast { %Callable* }* %fn to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %error, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %error, i32 1) + %data = call %Qubit* @__quantum__rt__qubit_allocate() + %auxiliaryQubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nScratch) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 1) + %16 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %16, i64 0) + %18 = bitcast i8* %17 to %Qubit** + store %Qubit* %data, %Qubit** %18, align 8 + %register = call %Array* @__quantum__rt__array_concatenate(%Array* %16, %Array* %auxiliaryQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %19 = call double @Microsoft__Quantum__Math__PI__body() + %20 = fdiv double %19, 3.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %20, %Qubit* %data) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %10, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to %Qubit** + store %Qubit* %data, %Qubit** %23, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliaryQubits, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Array*, %Array* }* + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %25, i32 0, i32 1 + store %Array* %21, %Array** %26, align 8 + store %Array* %auxiliaryQubits, %Array** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }* }* getelementptr ({ { %Array* }* }, { { %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { { %Array* }* }* + %30 = getelementptr inbounds { { %Array* }* }, { { %Array* }* }* %29, i32 0, i32 0 + %codeBlock = load { %Array* }*, { %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Array* }, { %Array* }* %codeBlock, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %33 = bitcast { %Array* }* %codeBlock to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %33, i32 1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { %Array* }* + %36 = getelementptr inbounds { %Array* }, { %Array* }* %35, i32 0, i32 0 + store %Array* %32, %Array** %36, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %error, %Tuple* %34, %Tuple* null) + call void @Microsoft__Quantum__ErrorCorrection__Recover__body({ { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, { %Callable* }* %fn, { %Array* }* %codeBlock) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %33, %Tuple* %37) + %38 = bitcast %Tuple* %37 to { %Array*, %Array* }* + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 0 + %decodedData = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %decodedData, i32 1) + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 1 + %decodedAuxiliary = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %decodedAuxiliary, i32 1) + %41 = call double @Microsoft__Quantum__Math__PI__body() + %42 = fdiv double %41, 3.000000e+00 + call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %42, %Qubit* %data) + %43 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 0) + %45 = bitcast i8* %44 to i2* + store i2 -2, i2* %45, align 1 + %46 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %46, i64 0) + %48 = bitcast i8* %47 to %Qubit** + store %Qubit* %data, %Qubit** %48, align 8 + %49 = call %Result* @__quantum__rt__result_get_zero() + %50 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @0, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %43, %Array* %46, %Result* %49, %String* %50) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %decodedData, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %decodedAuxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %decodedData, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %decodedAuxiliary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %43, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %50, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %auxiliaryQubits) + call void @__quantum__rt__qubit_release(%Qubit* %data) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %error, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %error, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ErrorCorrection__Recover__body({ { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, { %Callable* }* %fn, { %Array* }* %logicalRegister) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 0 + %encode = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %encode, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %encode to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 1 + %decode = load { %Callable* }*, { %Callable* }** %4, align 8 + %5 = getelementptr inbounds { %Callable* }, { %Callable* }* %decode, i32 0, i32 0 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + %7 = bitcast { %Callable* }* %decode to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code, i32 0, i32 2 + %syndMeas = load { %Callable* }*, { %Callable* }** %8, align 8 + %9 = getelementptr inbounds { %Callable* }, { %Callable* }* %syndMeas, i32 0, i32 0 + %10 = load %Callable*, %Callable** %9, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %10, i32 1) + %11 = bitcast { %Callable* }* %syndMeas to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = bitcast { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %code to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = getelementptr inbounds { %Callable* }, { %Callable* }* %fn, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + %15 = bitcast { %Callable* }* %fn to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %logicalRegister, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %logicalRegister to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %10, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }* }* getelementptr ({ { %Array* }* }, { { %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %18, %Tuple* %19) + %20 = bitcast %Tuple* %19 to { { %Array* }* }* + %21 = getelementptr inbounds { { %Array* }* }, { { %Array* }* }* %20, i32 0, i32 0 + %syndrome = load { %Array* }*, { %Array* }** %21, align 8 + %22 = getelementptr inbounds { %Array* }, { %Array* }* %syndrome, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { %Array* }* %syndrome to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { %Array* }* + %27 = getelementptr inbounds { %Array* }, { %Array* }* %26, i32 0, i32 0 + %recoveryOp = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %recoveryOp, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauli__body(%Array* %recoveryOp, %Array* %17) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %recoveryOp, i32 -1) + %28 = call i64 @__quantum__rt__array_get_size_1d(%Array* %23) + %29 = sub i64 %28, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %30 = phi i64 [ 0, %entry ], [ %35, %exiting__1 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %30) + %33 = bitcast i8* %32 to %Result** + %34 = load %Result*, %Result** %33, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %34, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %35 = add i64 %30, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %recoveryOp, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___d5999d2381314c67a04b2c24f6c12486_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___091380f3b5d14dd89cf85a46bbbe3f40_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, %Qubit* }* + %8 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, %Qubit* }* + %8 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, %Qubit* }* + %10 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 1 + store %Qubit* %7, %Qubit** %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Qubit*, %Qubit* }* }* getelementptr ({ %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Qubit*, %Qubit* }* %9, { %Qubit*, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, %Qubit* }* + %10 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 1 + store %Qubit* %7, %Qubit** %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Qubit*, %Qubit* }* }* getelementptr ({ %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Qubit*, %Qubit* }* %9, { %Qubit*, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %3, { %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %3, { %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___d5999d2381314c67a04b2c24f6c12486_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___091380f3b5d14dd89cf85a46bbbe3f40_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + %4 = sub i64 %3, %1 + %5 = sdiv i64 %4, %2 + %6 = mul i64 %2, %5 + %7 = add i64 %1, %6 + %8 = sub i64 0, %2 + %9 = insertvalue %Range zeroinitializer, i64 %7, 0 + %10 = insertvalue %Range %9, i64 %8, 1 + %11 = insertvalue %Range %10, i64 %1, 2 + %12 = extractvalue %Range %11, 0 + %13 = extractvalue %Range %11, 1 + %14 = extractvalue %Range %11, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %15 = icmp sgt i64 %13, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %12, %preheader__1 ], [ %26, %exiting__1 ] + %16 = icmp sle i64 %__qsVar0__idxQubit__, %14 + %17 = icmp sge i64 %__qsVar0__idxQubit__, %14 + %18 = select i1 %15, i1 %16, i1 %17 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Qubit* }* + %25 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %24, i32 0, i32 0 + store %Qubit* %22, %Qubit** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %26 = add i64 %__qsVar0__idxQubit__, %13 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, %Qubit* }* + %8 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, %Qubit* }* + %8 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, %Qubit* }* + %10 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 1 + store %Qubit* %7, %Qubit** %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Qubit*, %Qubit* }* }* getelementptr ({ %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Qubit*, %Qubit* }* %9, { %Qubit*, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, %Qubit* }* + %10 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 1 + store %Qubit* %7, %Qubit** %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Qubit*, %Qubit* }* }* getelementptr ({ %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Qubit*, %Qubit* }* %9, { %Qubit*, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define void @Microsoft__Quantum__Samples__BitFlipCode__EncodeIntoBitFlipCode__ctl(%Array* %__controlQubits__, { %Qubit*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %0, i32 0, i32 0 + %data = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %0, i32 0, i32 1 + %auxiliaryQubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__CNOT__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Qubit* }, { %Callable*, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store %Qubit* %data, %Qubit** %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliaryQubits, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + store %Callable* %8, %Callable** %11, align 8 + store %Array* %auxiliaryQubits, %Array** %12, align 8 + call void @Microsoft__Quantum__Canon___d5999d2381314c67a04b2c24f6c12486_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %10) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___d5999d2381314c67a04b2c24f6c12486_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___091380f3b5d14dd89cf85a46bbbe3f40_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %7 = icmp sgt i64 %5, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %4, %preheader__1 ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idxQubit, %6 + %9 = icmp sge i64 %idxQubit, %6 + %10 = select i1 %7, i1 %8, i1 %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store %Qubit* %14, %Qubit** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxQubit, %5 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, %Qubit* }* + %8 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, %Qubit* }* + %8 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, %Qubit* }* + %10 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 1 + store %Qubit* %7, %Qubit** %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Qubit*, %Qubit* }* }* getelementptr ({ %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Qubit*, %Qubit* }* %9, { %Qubit*, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, %Qubit* }* + %10 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 1 + store %Qubit* %7, %Qubit** %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Qubit*, %Qubit* }* }* getelementptr ({ %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Qubit*, %Qubit* }* %9, { %Qubit*, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define void @Microsoft__Quantum__Samples__BitFlipCode__EncodeIntoBitFlipCode__ctladj(%Array* %__controlQubits__, { %Qubit*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %0, i32 0, i32 0 + %data = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %0, i32 0, i32 1 + %auxiliaryQubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__CNOT__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Qubit* }, { %Callable*, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store %Qubit* %data, %Qubit** %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliaryQubits, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Callable*, %Array* }* + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %10, i32 0, i32 1 + store %Callable* %8, %Callable** %11, align 8 + store %Array* %auxiliaryQubits, %Array** %12, align 8 + call void @Microsoft__Quantum__Canon___d5999d2381314c67a04b2c24f6c12486_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %10) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliaryQubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___d5999d2381314c67a04b2c24f6c12486_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___091380f3b5d14dd89cf85a46bbbe3f40_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + %7 = sub i64 %6, %4 + %8 = sdiv i64 %7, %5 + %9 = mul i64 %5, %8 + %10 = add i64 %4, %9 + %11 = sub i64 0, %5 + %12 = insertvalue %Range zeroinitializer, i64 %10, 0 + %13 = insertvalue %Range %12, i64 %11, 1 + %14 = insertvalue %Range %13, i64 %4, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %15, %preheader__1 ], [ %30, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idxQubit__, %17 + %20 = icmp sge i64 %__qsVar0__idxQubit__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, %Qubit* }* + %28 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %28, align 8 + store %Qubit* %25, %Qubit** %29, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %26, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %30 = add i64 %__qsVar0__idxQubit__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, %Qubit* }* + %8 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %1 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 1 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Qubit*, %Qubit* }* + %8 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %7, i32 0, i32 1 + store %Qubit* %2, %Qubit** %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, %Qubit* }* + %10 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 1 + store %Qubit* %7, %Qubit** %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Qubit*, %Qubit* }* }* getelementptr ({ %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Qubit*, %Qubit* }* %9, { %Qubit*, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Qubit* }* + %6 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Qubit*, %Qubit* }* + %10 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %9, i32 0, i32 1 + store %Qubit* %7, %Qubit** %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Qubit*, %Qubit* }* }* getelementptr ({ %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Qubit*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Qubit*, %Qubit* }* %9, { %Qubit*, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Qubit* }, { %Callable*, %Qubit* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define void @BitFlipCode__Program__body() { +entry: + call void @Microsoft__Quantum__Samples__BitFlipCode__CheckBitFlipCodeStateParity__body() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([34 x i8], [34 x i8]* @3, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %0) + call void @Microsoft__Quantum__Samples__BitFlipCode__CheckBitFlipCodeCorrectsBitFlipErrors__body() + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @4, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %1) + call void @Microsoft__Quantum__Samples__BitFlipCode__CheckCanonBitFlipCodeCorrectsBitFlipErrors__body() + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([50 x i8], [50 x i8]* @4, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %2) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) + +define internal i64 @Microsoft__Quantum__Convert__BoolArrayAsInt__body(%Array* %bits) { +entry: + %number = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = icmp slt i64 %nBits, 64 + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @5, i32 0, i32 0)) + %2 = call %String* @__quantum__rt__int_to_string(i64 %nBits) + %3 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %2) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + %4 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %5 = call %String* @__quantum__rt__string_concatenate(%String* %3, %String* %4) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %0, %String* %5) + store i64 0, i64* %number, align 4 + %6 = sub i64 %nBits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxBit = phi i64 [ 0, %entry ], [ %16, %exiting__1 ] + %7 = icmp sle i64 %idxBit, %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bits, i64 %idxBit) + %9 = bitcast i8* %8 to i1* + %10 = load i1, i1* %9, align 1 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %11 = load i64, i64* %number, align 4 + %12 = trunc i64 %idxBit to i32 + %13 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %12) + %14 = fptosi double %13 to i64 + %15 = add i64 %11, %14 + store i64 %15, i64* %number, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %16 = add i64 %idxBit, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %17 = load i64, i64* %number, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %5, i32 -1) + ret i64 %17 +} + +define internal void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %actual, %String* %message) { +entry: + %0 = xor i1 %actual, true + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__string_update_reference_count(%String* %message, i32 1) + call void @__quantum__rt__fail(%String* %message) + unreachable + +continue__1: ; preds = %entry + ret void +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare %String* @__quantum__rt__int_to_string(i64) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.powi.f64.i32(double, i32) #0 + +define internal %Array* @Microsoft__Quantum__Convert__ResultArrayAsBoolArray__body(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Convert__ResultAsBool__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___f90ef0ea024d4f09ab6faad2b9e5b5ec_Mapped__body(%Callable* %0, %Array* %input) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret %Array* %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___f90ef0ea024d4f09ab6faad2b9e5b5ec_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to %Result** + %4 = load %Result*, %Result** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Result* }* + %7 = getelementptr inbounds { %Result* }, { %Result* }* %6, i32 0, i32 0 + store %Result* %4, %Result** %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { i1 }* + %10 = getelementptr inbounds { i1 }, { i1 }* %9, i32 0, i32 0 + %first = load i1, i1* %10, align 1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %12 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %13 = phi i64 [ 0, %continue__1 ], [ %17, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %13) + %16 = bitcast i8* %15 to i1* + store i1 %first, i1* %16, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %11, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %18 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %35, %exiting__2 ] + %19 = icmp sle i64 %idx, %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + %21 = call %Array* @__quantum__rt__array_copy(%Array* %20, i1 false) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %23 = bitcast i8* %22 to %Result** + %24 = load %Result*, %Result** %23, align 8 + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Result* }* + %27 = getelementptr inbounds { %Result* }, { %Result* }* %26, i32 0, i32 0 + store %Result* %24, %Result** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %25, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { i1 }* + %30 = getelementptr inbounds { i1 }, { i1 }* %29, i32 0, i32 0 + %31 = load i1, i1* %30, align 1 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %idx) + %33 = bitcast i8* %32 to i1* + %34 = load i1, i1* %33, align 1 + store i1 %31, i1* %33, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + store %Array* %21, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %35 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %36 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %36 +} + +define internal void @Microsoft__Quantum__Convert__ResultAsBool__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Result* }* + %1 = getelementptr inbounds { %Result* }, { %Result* }* %0, i32 0, i32 0 + %2 = load %Result*, %Result** %1, align 8 + %3 = call i1 @Microsoft__Quantum__Convert__ResultAsBool__body(%Result* %2) + %4 = bitcast %Tuple* %result-tuple to { i1 }* + %5 = getelementptr inbounds { i1 }, { i1 }* %4, i32 0, i32 0 + store i1 %3, i1* %5, align 1 + ret void +} + +define internal i1 @Microsoft__Quantum__Convert__ResultAsBool__body(%Result* %input) { +entry: + %0 = call %Result* @__quantum__rt__result_get_zero() + %1 = call i1 @__quantum__rt__result_equal(%Result* %input, %Result* %0) + %2 = select i1 %1, i1 false, i1 true + ret i1 %2 +} + +define internal i64 @Microsoft__Quantum__Convert__ResultArrayAsInt__body(%Array* %results) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %results, i32 1) + %0 = call %Array* @Microsoft__Quantum__Convert__ResultArrayAsBoolArray__body(%Array* %results) + %1 = call i64 @Microsoft__Quantum__Convert__BoolArrayAsInt__body(%Array* %0) + call void @__quantum__rt__array_update_alias_count(%Array* %results, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret i64 %1 +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__h__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +define internal void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__r__body(i2, double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__adj(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__r__adj(i2, double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, double, %Qubit* }* + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 2 + store i2 %pauli, i2* %6, align 1 + store double %theta, double* %7, align 8 + store %Qubit* %qubit, %Qubit** %8, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__r__ctl(%Array*, { i2, double, %Qubit* }*) + +define internal void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, double, %Qubit* }* + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 2 + store i2 %pauli, i2* %6, align 1 + store double %theta, double* %7, align 8 + store %Qubit* %qubit, %Qubit** %8, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__r__ctladj(%Array*, { i2, double, %Qubit* }*) + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 1, i2* %5, align 1 + store double %theta, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %theta__1 = fneg double %theta + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 1, i2* %5, align 1 + store double %theta__1, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__y__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__y__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__z__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___7b778b591d0f4b348622943561272052_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___acdab0e54f3e48e19265054d194b19e3_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %13 = icmp sgt i64 %11, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %10, %preheader__1 ], [ %21, %exiting__2 ] + %14 = icmp sle i64 %idxQubit, %12 + %15 = icmp sge i64 %idxQubit, %12 + %16 = select i1 %13, i1 %14, i1 %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %18 = bitcast i8* %17 to { i2, %Qubit* }** + %19 = load { i2, %Qubit* }*, { i2, %Qubit* }** %18, align 8 + %20 = bitcast { i2, %Qubit* }* %19 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %20, %Tuple* null) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %idxQubit, %11 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %22 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %29, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %23) + %26 = bitcast i8* %25 to { i2, %Qubit* }** + %27 = load { i2, %Qubit* }*, { i2, %Qubit* }** %26, align 8 + %28 = bitcast { i2, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %29 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i2, %Qubit* }* + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %3 = load i2, i2* %1, align 1 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__body(i2 %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i2, %Qubit* }* + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %3 = load i2, i2* %1, align 1 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i2, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i2, %Qubit* }*, { i2, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %3, { i2, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i2, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i2, %Qubit* }*, { i2, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %3, { i2, %Qubit* }* %4) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___4c71f94c9bd54353b668964b77a208ce_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i2* + %7 = load i2, i2* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, %Qubit* }* + %13 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %12, i32 0, i32 1 + store i2 %7, i2* %13, align 1 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i2, %Qubit* }** + store { i2, %Qubit* }* %12, { i2, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i2, %Qubit* }** + %27 = load { i2, %Qubit* }*, { i2, %Qubit* }** %26, align 8 + %28 = bitcast { i2, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i2* + %36 = load i2, i2* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i2, %Qubit* }* + %42 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %41, i32 0, i32 1 + store i2 %36, i2* %42, align 1 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i2, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i2, %Qubit* }*, { i2, %Qubit* }** %45, align 8 + %47 = bitcast { i2, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i2, %Qubit* }* %41, { i2, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i2, %Qubit* }** + %56 = load { i2, %Qubit* }*, { i2, %Qubit* }** %55, align 8 + %57 = bitcast { i2, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal void @Microsoft__Quantum__Canon___7b778b591d0f4b348622943561272052_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___acdab0e54f3e48e19265054d194b19e3_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + %13 = sub i64 %12, %10 + %14 = sdiv i64 %13, %11 + %15 = mul i64 %11, %14 + %16 = add i64 %10, %15 + %17 = sub i64 0, %11 + %18 = insertvalue %Range zeroinitializer, i64 %16, 0 + %19 = insertvalue %Range %18, i64 %17, 1 + %20 = insertvalue %Range %19, i64 %10, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %24 = icmp sgt i64 %22, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__2 ] + %25 = icmp sle i64 %__qsVar0__idxQubit__, %23 + %26 = icmp sge i64 %__qsVar0__idxQubit__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %28 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %28) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %30 = bitcast i8* %29 to { i2, %Qubit* }** + %31 = load { i2, %Qubit* }*, { i2, %Qubit* }** %30, align 8 + %32 = bitcast { i2, %Qubit* }* %31 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %28, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %__qsVar0__idxQubit__, %22 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %35) + %38 = bitcast i8* %37 to { i2, %Qubit* }** + %39 = load { i2, %Qubit* }*, { i2, %Qubit* }** %38, align 8 + %40 = bitcast { i2, %Qubit* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___7b778b591d0f4b348622943561272052_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %5) + %8 = bitcast i8* %7 to { i2, %Qubit* }** + %9 = load { i2, %Qubit* }*, { i2, %Qubit* }** %8, align 8 + %10 = bitcast { i2, %Qubit* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %12 = call %Range @Microsoft__Quantum__Arrays___acdab0e54f3e48e19265054d194b19e3_IndexRange__body(%Array* %register) + %13 = extractvalue %Range %12, 0 + %14 = extractvalue %Range %12, 1 + %15 = extractvalue %Range %12, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %16 = icmp sgt i64 %14, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %13, %preheader__1 ], [ %29, %exiting__2 ] + %17 = icmp sle i64 %idxQubit, %15 + %18 = icmp sge i64 %idxQubit, %15 + %19 = select i1 %16, i1 %17, i1 %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %20) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %22 = bitcast i8* %21 to { i2, %Qubit* }** + %23 = load { i2, %Qubit* }*, { i2, %Qubit* }** %22, align 8 + %24 = bitcast { i2, %Qubit* }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i2, %Qubit* }* }* getelementptr ({ %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array*, { i2, %Qubit* }* }* + %27 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %26, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %27, align 8 + store { i2, %Qubit* }* %23, { i2, %Qubit* }** %28, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %25, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %idxQubit, %14 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %30 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %37, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %31) + %34 = bitcast i8* %33 to { i2, %Qubit* }** + %35 = load { i2, %Qubit* }*, { i2, %Qubit* }** %34, align 8 + %36 = bitcast { i2, %Qubit* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %37 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___7b778b591d0f4b348622943561272052_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %5) + %8 = bitcast i8* %7 to { i2, %Qubit* }** + %9 = load { i2, %Qubit* }*, { i2, %Qubit* }** %8, align 8 + %10 = bitcast { i2, %Qubit* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %12 = call %Range @Microsoft__Quantum__Arrays___acdab0e54f3e48e19265054d194b19e3_IndexRange__body(%Array* %register) + %13 = extractvalue %Range %12, 0 + %14 = extractvalue %Range %12, 1 + %15 = extractvalue %Range %12, 2 + %16 = sub i64 %15, %13 + %17 = sdiv i64 %16, %14 + %18 = mul i64 %14, %17 + %19 = add i64 %13, %18 + %20 = sub i64 0, %14 + %21 = insertvalue %Range zeroinitializer, i64 %19, 0 + %22 = insertvalue %Range %21, i64 %20, 1 + %23 = insertvalue %Range %22, i64 %13, 2 + %24 = extractvalue %Range %23, 0 + %25 = extractvalue %Range %23, 1 + %26 = extractvalue %Range %23, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %27 = icmp sgt i64 %25, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %24, %preheader__1 ], [ %40, %exiting__2 ] + %28 = icmp sle i64 %__qsVar0__idxQubit__, %26 + %29 = icmp sge i64 %__qsVar0__idxQubit__, %26 + %30 = select i1 %27, i1 %28, i1 %29 + br i1 %30, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %31 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %31) + call void @__quantum__rt__callable_make_controlled(%Callable* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %33 = bitcast i8* %32 to { i2, %Qubit* }** + %34 = load { i2, %Qubit* }*, { i2, %Qubit* }** %33, align 8 + %35 = bitcast { i2, %Qubit* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i2, %Qubit* }* }* getelementptr ({ %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, { i2, %Qubit* }* }* + %38 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %37, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %38, align 8 + store { i2, %Qubit* }* %34, { i2, %Qubit* }** %39, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %31, %Tuple* %36, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %40 = add i64 %__qsVar0__idxQubit__, %25 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %41 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %42 = phi i64 [ 0, %exit__2 ], [ %48, %exiting__3 ] + %43 = icmp sle i64 %42, %41 + br i1 %43, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %42) + %45 = bitcast i8* %44 to { i2, %Qubit* }** + %46 = load { i2, %Qubit* }*, { i2, %Qubit* }** %45, align 8 + %47 = bitcast { i2, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %42, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal %Range @Microsoft__Quantum__Arrays___091380f3b5d14dd89cf85a46bbbe3f40_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Range @Microsoft__Quantum__Arrays___acdab0e54f3e48e19265054d194b19e3_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = sub i64 %0, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %12) + %15 = bitcast i8* %14 to { i2, %Qubit* }** + %16 = load { i2, %Qubit* }*, { i2, %Qubit* }** %15, align 8 + %17 = bitcast { i2, %Qubit* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %10 +} + +declare void @__quantum__qis__assertmeasurementprobability__body(%Array*, %Array*, %Result*, double, %String*, double) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %controllingQubits, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Array*, %Result*, %String* }* + %7 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 3 + store %Array* %bases, %Array** %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + store %Result* %result, %Result** %9, align 8 + store %String* %msg, %String** %10, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__adj(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__ctl(%Array*, { %Array*, %Array*, %Result*, double, %String*, double }*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare void @__quantum__rt__fail(%String*) + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +define internal %Array* @Microsoft__Quantum__Arrays___5594a8cc99714c238e5799c8b11d47dd_ForEach__body(%Callable* %action, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %7 = icmp eq i64 %length, 0 + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + %9 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %11 = bitcast i8* %10 to %Array** + %12 = load %Array*, %Array** %11, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %12, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %13, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { %Result* }* + %18 = getelementptr inbounds { %Result* }, { %Result* }* %17, i32 0, i32 0 + %first = load %Result*, %Result** %18, align 8 + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %20 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %21 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %9 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %8 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %20 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %27) + %30 = bitcast i8* %29 to %Result** + store %Result* %first, %Result** %30, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %first, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %19, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %49, %exiting__4 ] + %33 = icmp sle i64 %idx, %32 + br i1 %33, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %34 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %37 = bitcast i8* %36 to %Array** + %38 = load %Array*, %Array** %37, align 8 + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Array* }* + %41 = getelementptr inbounds { %Array* }, { %Array* }* %40, i32 0, i32 0 + store %Array* %38, %Array** %41, align 8 + %42 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %39, %Tuple* %42) + %43 = bitcast %Tuple* %42 to { %Result* }* + %44 = getelementptr inbounds { %Result* }, { %Result* }* %43, i32 0, i32 0 + %45 = load %Result*, %Result** %44, align 8 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %idx) + %47 = bitcast i8* %46 to %Result** + call void @__quantum__rt__result_update_reference_count(%Result* %45, i32 1) + %48 = load %Result*, %Result** %47, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %48, i32 -1) + store %Result* %45, %Result** %47, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %49 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %50 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + %51 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %52 = phi i64 [ 0, %exit__4 ], [ %57, %exiting__5 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %52) + %55 = bitcast i8* %54 to %Array** + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %57 = add i64 %52, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret %Array* %50 +} + +define internal %Range @Microsoft__Quantum__Arrays___dd1899b05ba64a609aacbcacbdbdf453_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Array* @Microsoft__Quantum__Measurement__MeasurePaulis__body(%Array* %paulis, %Array* %target, %Callable* %gadget) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %paulis) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %gadget, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %gadget, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %gadget, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %gadget, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %gadget, %Callable** %10, align 8 + store %Array* %target, %Array** %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__3__FunctionTable, %Tuple* %8) + %13 = call %Array* @Microsoft__Quantum__Arrays___5594a8cc99714c238e5799c8b11d47dd_ForEach__body(%Callable* %12, %Array* %paulis) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %20, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %15) + %18 = bitcast i8* %17 to %Array** + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %20 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %gadget, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %gadget, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret %Array* %13 +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @MemoryManagement__3__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__3__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Result* @Microsoft__Quantum__Measurement__MeasureWithScratch__body(%Array* %pauli, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %scratch = call %Qubit* @__quantum__rt__qubit_allocate() + call void @__quantum__qis__h__body(%Qubit* %scratch) + %0 = call %Range @Microsoft__Quantum__Arrays___dd1899b05ba64a609aacbcacbdbdf453_IndexRange__body(%Array* %pauli) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxPauli = phi i64 [ %1, %preheader__1 ], [ %21, %exiting__1 ] + %5 = icmp sle i64 %idxPauli, %3 + %6 = icmp sge i64 %idxPauli, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %pauli, i64 %idxPauli) + %9 = bitcast i8* %8 to i2* + %P = load i2, i2* %9, align 1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %target, i64 %idxPauli) + %11 = bitcast i8* %10 to %Qubit** + %src = load %Qubit*, %Qubit** %11, align 8 + %12 = icmp eq i2 %P, 1 + br i1 %12, label %then0__1, label %test1__1 + +then0__1: ; preds = %body__1 + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %14 = bitcast i8* %13 to %Qubit** + store %Qubit* %scratch, %Qubit** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %src) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %body__1 + %15 = icmp eq i2 %P, -1 + br i1 %15, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %__controlQubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__1, i64 0) + %17 = bitcast i8* %16 to %Qubit** + store %Qubit* %scratch, %Qubit** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__1, %Qubit* %src) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %18 = icmp eq i2 %P, -2 + br i1 %18, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + %__controlQubits__2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__2, i64 0) + %20 = bitcast i8* %19 to %Qubit** + store %Qubit* %scratch, %Qubit** %20, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__2, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__2, %Qubit* %src) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__2, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %21 = add i64 %idxPauli, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__qis__h__body(%Qubit* %scratch) + %22 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %scratch) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %scratch) + ret %Result* %22 +} + +define internal %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) { +entry: + %result = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %target) + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %0) + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret %Result* %result +} + +define internal void @Microsoft__Quantum__ErrorCorrection____QsRef1__ApplyBitFlipEncoder____body(i1 %coherentRecovery, %Array* %data, %Array* %scratch) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %scratch, i32 1) + br i1 %coherentRecovery, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %scratch, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1, align 8 + call void @__quantum__qis__x__ctl(%Array* %scratch, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %scratch, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %scratch, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %3, align 8 + call void @__quantum__qis__x__ctl(%Array* %data, %Qubit* %qubit__1) + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %scratch, i64 1) + %5 = bitcast i8* %4 to %Qubit** + %qubit__2 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__x__ctl(%Array* %data, %Qubit* %qubit__2) + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %scratch, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ErrorCorrection____QsRef1__ApplyBitFlipEncoder____adj(i1 %coherentRecovery, %Array* %data, %Array* %scratch) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %scratch, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %scratch, i64 1) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1, align 8 + call void @__quantum__qis__x__ctl(%Array* %data, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %scratch, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %3, align 8 + call void @__quantum__qis__x__ctl(%Array* %data, %Qubit* %qubit__1) + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + br i1 %coherentRecovery, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %scratch, i32 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %qubit__2 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__qis__x__ctl(%Array* %scratch, %Qubit* %qubit__2) + call void @__quantum__rt__array_update_alias_count(%Array* %scratch, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %scratch, i32 -1) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__ErrorCorrection__EncodeOp__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__ErrorCorrection__EncodeIntoBitFlipCode__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call { %Array* }* @Microsoft__Quantum__ErrorCorrection__EncodeIntoBitFlipCode__body(%Array* %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { { %Array* }* }* + %7 = getelementptr inbounds { { %Array* }* }, { { %Array* }* }* %6, i32 0, i32 0 + store { %Array* }* %5, { %Array* }** %7, align 8 + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__ErrorCorrection__DecodeOp__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__ErrorCorrection__DecodeFromBitFlipCode__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = call { %Array*, %Array* }* @Microsoft__Quantum__ErrorCorrection__DecodeFromBitFlipCode__body({ %Array* }* %0) + %2 = bitcast %Tuple* %result-tuple to { %Array*, %Array* }* + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + store %Array* %6, %Array** %3, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + store %Array* %8, %Array** %4, align 8 + %9 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__ErrorCorrection__SyndromeMeasOp__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }*, %Callable* }* getelementptr ({ %Array*, { %Array* }*, %Callable* }, { %Array*, { %Array* }*, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, { %Array* }*, %Callable* }* + %7 = getelementptr inbounds { %Array*, { %Array* }*, %Callable* }, { %Array*, { %Array* }*, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, { %Array* }*, %Callable* }, { %Array*, { %Array* }*, %Callable* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, { %Array* }*, %Callable* }, { %Array*, { %Array* }*, %Callable* }* %6, i32 0, i32 2 + store %Array* %2, %Array** %7, align 8 + %10 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %10, { %Array* }** %8, align 8 + store %Callable* %4, %Callable** %9, align 8 + %11 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %12 = load %Callable*, %Callable** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %5, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ErrorCorrection__MeasureStabilizerGenerators__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }*, %Callable* }* + %1 = getelementptr inbounds { %Array*, { %Array* }*, %Callable* }, { %Array*, { %Array* }*, %Callable* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }*, %Callable* }, { %Array*, { %Array* }*, %Callable* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, { %Array* }*, %Callable* }, { %Array*, { %Array* }*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Callable*, %Callable** %3, align 8 + %7 = call { %Array* }* @Microsoft__Quantum__ErrorCorrection__MeasureStabilizerGenerators__body(%Array* %4, { %Array* }* %5, %Callable* %6) + %8 = bitcast %Tuple* %result-tuple to { { %Array* }* }* + %9 = getelementptr inbounds { { %Array* }* }, { { %Array* }* }* %8, i32 0, i32 0 + store { %Array* }* %7, { %Array* }** %9, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Measurement__MeasureWithScratch__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Result* @Microsoft__Quantum__Measurement__MeasureWithScratch__body(%Array* %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Result* }* + %7 = getelementptr inbounds { %Result* }, { %Result* }* %6, i32 0, i32 0 + store %Result* %5, %Result** %7, align 8 + ret void +} + +define internal void @MemoryManagement__4__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Array** + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %13 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__4__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Array** + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %13 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { %Callable* }*, { %Callable* }*, { %Callable* }* }* @Microsoft__Quantum__ErrorCorrection__QECC__body({ %Callable* }* %__Item1__, { %Callable* }* %__Item2__, { %Callable* }* %__Item3__) { +entry: + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item2__, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item3__, i32 0, i32 0 + %7 = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %7, i32 1) + %8 = bitcast { %Callable* }* %__Item3__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }*, { %Callable* }*, { %Callable* }* }* getelementptr ({ { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { { %Callable* }*, { %Callable* }*, { %Callable* }* }* + %11 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %10, i32 0, i32 1 + %13 = getelementptr inbounds { { %Callable* }*, { %Callable* }*, { %Callable* }* }, { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %10, i32 0, i32 2 + store { %Callable* }* %__Item1__, { %Callable* }** %11, align 8 + store { %Callable* }* %__Item2__, { %Callable* }** %12, align 8 + store { %Callable* }* %__Item3__, { %Callable* }** %13, align 8 + %14 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %15 = load %Callable*, %Callable** %14, align 8 + %16 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item2__, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item3__, i32 0, i32 0 + %19 = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 1) + %20 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 1) + %21 = bitcast { %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 1) + %22 = bitcast { %Callable* }* %__Item3__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %7, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + ret { { %Callable* }*, { %Callable* }*, { %Callable* }* }* %10 +} + +define internal { %Array* }* @Microsoft__Quantum__ErrorCorrection__EncodeIntoBitFlipCode__body(%Array* %physRegister, %Array* %auxQubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %physRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxQubits, i32 1) + call void @Microsoft__Quantum__ErrorCorrection____QsRef1__ApplyBitFlipEncoder____body(i1 false, %Array* %physRegister, %Array* %auxQubits) + %0 = call %Array* @__quantum__rt__array_concatenate(%Array* %physRegister, %Array* %auxQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 1) + %logicalRegister = call { %Array* }* @Microsoft__Quantum__ErrorCorrection__LogicalRegister__body(%Array* %0) + %1 = getelementptr inbounds { %Array* }, { %Array* }* %logicalRegister, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %3 = bitcast { %Array* }* %logicalRegister to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %physRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret { %Array* }* %logicalRegister +} + +define internal { %Array*, %Array* }* @Microsoft__Quantum__ErrorCorrection__DecodeFromBitFlipCode__body({ %Array* }* %logicalRegister) { +entry: + %0 = getelementptr inbounds { %Array* }, { %Array* }* %logicalRegister, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %logicalRegister to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 0) + %4 = bitcast i8* %3 to %Qubit** + %5 = load %Qubit*, %Qubit** %4, align 8 + %physRegister = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %physRegister, i64 0) + %7 = bitcast i8* %6 to %Qubit** + store %Qubit* %5, %Qubit** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %physRegister, i32 1) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 1) + %9 = bitcast i8* %8 to %Qubit** + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 2) + %11 = bitcast i8* %10 to %Qubit** + %12 = load %Qubit*, %Qubit** %9, align 8 + %13 = load %Qubit*, %Qubit** %11, align 8 + %auxQubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %auxQubits, i64 0) + %15 = bitcast i8* %14 to %Qubit** + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %auxQubits, i64 1) + %17 = bitcast i8* %16 to %Qubit** + store %Qubit* %12, %Qubit** %15, align 8 + store %Qubit* %13, %Qubit** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxQubits, i32 1) + call void @Microsoft__Quantum__ErrorCorrection____QsRef1__ApplyBitFlipEncoder____adj(i1 false, %Array* %physRegister, %Array* %auxQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %physRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxQubits, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Array* }* + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 1 + store %Array* %physRegister, %Array** %20, align 8 + store %Array* %auxQubits, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %physRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxQubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %physRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxQubits, i32 -1) + ret { %Array*, %Array* }* %19 +} + +define internal { %Array* }* @Microsoft__Quantum__ErrorCorrection__MeasureStabilizerGenerators__body(%Array* %stabilizerGroup, { %Array* }* %logicalRegister, %Callable* %gadget) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %stabilizerGroup) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %stabilizerGroup, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %stabilizerGroup, i32 1) + %8 = getelementptr inbounds { %Array* }, { %Array* }* %logicalRegister, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = bitcast { %Array* }* %logicalRegister to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %gadget, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %gadget, i32 1) + %results = call %Array* @Microsoft__Quantum__Measurement__MeasurePaulis__body(%Array* %stabilizerGroup, %Array* %9, %Callable* %gadget) + call void @__quantum__rt__array_update_alias_count(%Array* %results, i32 1) + %11 = call { %Array* }* @Microsoft__Quantum__ErrorCorrection__Syndrome__body(%Array* %results) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %stabilizerGroup, i64 %13) + %16 = bitcast i8* %15 to %Array** + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %stabilizerGroup, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %gadget, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %gadget, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %results, i32 -1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %results) + %20 = sub i64 %19, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %21 = phi i64 [ 0, %exit__2 ], [ %26, %exiting__3 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %results, i64 %21) + %24 = bitcast i8* %23 to %Result** + %25 = load %Result*, %Result** %24, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %25, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %26 = add i64 %21, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %results, i32 -1) + ret { %Array* }* %11 +} + +define internal { %Callable* }* @Microsoft__Quantum__ErrorCorrection__TableLookupRecovery__body(%Array* %table) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %table) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %table, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %table, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__ErrorCorrection__TableLookupRecoveryImpl__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %table, i64 %10) + %13 = bitcast i8* %12 to %Array** + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %table, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %table, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__13__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__5__FunctionTable, %Tuple* %16) + %21 = call { %Callable* }* @Microsoft__Quantum__ErrorCorrection__RecoveryFn__body(%Callable* %20) + %22 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %table, i64 %23) + %26 = bitcast i8* %25 to %Array** + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %table, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %20, i32 -1) + ret { %Callable* }* %21 +} + +define internal { %Array* }* @Microsoft__Quantum__ErrorCorrection__LogicalRegister__body(%Array* %__Item1__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %__Item1__, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__Item1__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 -1) + ret { %Array* }* %1 +} + +define internal { %Array* }* @Microsoft__Quantum__ErrorCorrection__Syndrome__body(%Array* %__Item1__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %__Item1__, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__Item1__) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__Item1__, i64 %5) + %8 = bitcast i8* %7 to %Result** + %9 = load %Result*, %Result** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__Item1__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 -1) + ret { %Array* }* %1 +} + +define internal { %Callable* }* @Microsoft__Quantum__ErrorCorrection__RecoveryFn__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Lifted__PartialApplication__13__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, { %Array* }* }* + %5 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__ErrorCorrection__TableLookupRecoveryImpl__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = call %Array* @Microsoft__Quantum__ErrorCorrection__TableLookupRecoveryImpl__body(%Array* %3, { %Array* }* %4) + %6 = bitcast %Tuple* %result-tuple to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %5, %Array** %7, align 8 + ret void +} + +define internal void @MemoryManagement__5__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Array** + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__5__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Array** + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__ErrorCorrection__TableLookupRecoveryImpl__body(%Array* %table, { %Array* }* %syndrome) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %table) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %table, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %table, i32 1) + %8 = getelementptr inbounds { %Array* }, { %Array* }* %syndrome, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = bitcast { %Array* }* %syndrome to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %11 = call i64 @Microsoft__Quantum__Convert__ResultArrayAsInt__body(%Array* %9) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %table, i64 %11) + %13 = bitcast i8* %12 to %Array** + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %table, i64 %16) + %19 = bitcast i8* %18 to %Array** + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %table, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + ret %Array* %14 +} + +attributes #0 = { nofree nosync nounwind readnone speculatable willreturn } diff --git a/src/munchkin/tests/qsharp/hydrogen-sim/HydrogenSimulation.qs b/src/munchkin/tests/qsharp/hydrogen-sim/HydrogenSimulation.qs new file mode 100644 index 0000000..519d308 --- /dev/null +++ b/src/munchkin/tests/qsharp/hydrogen-sim/HydrogenSimulation.qs @@ -0,0 +1,107 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Chemistry.Samples.Hydrogen { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Chemistry.JordanWigner; + open Microsoft.Quantum.Simulation; + open Microsoft.Quantum.Characterization; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Math; + + // We now use the Q# component of the chemistry library to obtain + // quantum operations that implement real-time evolution by + // the chemistry Hamiltonian. Below, we consider two examples. + // - Trotter simulation algorithm + // - Qubitization simulation algorithm + + // These operations are invoked as oracles in the quantum phase estimation + // algorithm to extract energy estimates of various eigenstate of the + // Hamiltonian. + + // The returned energy estimate is chosen probabilistically, depending on + // the overlap of the initial trial state. By default, we greedily + // fill spin-orbitals to minimize the diagonal component of the one-electron + // energies. + + ////////////////////////////////////////////////////////////////////////// + // Using Trotterization ////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////////////// + + // We can now use Canon's phase estimation algorithms to + // learn the ground state energy using the above simulation. + operation GetEnergyByTrotterization (qSharpData : JordanWignerEncodingData, nBitsPrecision : Int, trotterStepSize : Double, trotterOrder : Int) : (Double, Double) { + + // The data describing the Hamiltonian for all these steps is contained in + // `qSharpData` + let (nSpinOrbitals, fermionTermData, statePrepData, energyOffset) = qSharpData!; + + // We use a Product formula, also known as `Trotterization` to + // simulate the Hamiltonian. + let (nQubits, (rescaleFactor, oracle)) = TrotterStepOracle(qSharpData, trotterStepSize, trotterOrder); + + // The operation that creates the trial state is defined below. + // By default, greedy filling of spin-orbitals is used. + let statePrep = PrepareTrialState(statePrepData, _); + + // We use the Robust Phase Estimation algorithm + // of Kimmel, Low and Yoder. + let phaseEstAlgorithm = RobustPhaseEstimation(nBitsPrecision, _, _); + + // This runs the quantum algorithm and returns a phase estimate. + let estPhase = EstimateEnergy(nQubits, statePrep, oracle, phaseEstAlgorithm); + + // We obtain the energy estimate by rescaling the phase estimate + // with the trotterStepSize. We also add the constant energy offset + // to the estimated energy. + let estEnergy = estPhase * rescaleFactor + energyOffset; + + // We return both the estimated phase, and the estimated energy. + return (estPhase, estEnergy); + } + + + ////////////////////////////////////////////////////////////////////////// + // Using Qubitization //////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////////////// + + // The following is identical to the approach above using Trotterization, + // except that we replace the oracle with a quantum walk created by the + // qubitization procedure. This results in a more accurate simulation, + // but at the cost of larger qubit overhead. + operation GetEnergyByQubitization (qSharpData : JordanWignerEncodingData, nBitsPrecision : Int) : (Double, Double) { + + // The data describing the Hamiltonian for all these steps is contained in + // `qSharpData` + let (nSpinOrbitals, fermionTermData, statePrepData, energyOffset) = qSharpData!; + + // The parameters required by Qubitization is contained in this + // convenience function. + let (nQubits, (oneNorm, oracle)) = QubitizationOracle(qSharpData); + + // The operation that creates the trial state is defined below. + // By default, greedy filling of spin-orbitals is used. + let statePrep = PrepareTrialState(statePrepData, _); + + // We use the Robust Phase Estimation algorithm + // of Kimmel, Low and Yoder. + let phaseEstAlgorithm = RobustPhaseEstimation(nBitsPrecision, _, _); + + // This runs the quantum algorithm and returns a phase estimate. + let estPhase = EstimateEnergy(nQubits, statePrep, oracle, phaseEstAlgorithm); + + // Note that the quantum walk applies e^{isin^{-1}{H/oneNorm}}, in contrast to + // real-time evolution e^{iHt} by a Product formula. + + // Thus We obtain the energy estimate by applying Sin(.) to the phase estimate + // then rescaling by the coefficient one-norm of the Hamiltonian. + // We also add the constant energy offset to the estimated energy. + let estEnergy = Sin(estPhase) * oneNorm + energyOffset; + + // We return both the estimated phase, and the estimated energy. + return (estPhase, estEnergy); + } + +} + diff --git a/src/munchkin/tests/qsharp/hydrogen-sim/Program.cs b/src/munchkin/tests/qsharp/hydrogen-sim/Program.cs new file mode 100644 index 0000000..8185cbc --- /dev/null +++ b/src/munchkin/tests/qsharp/hydrogen-sim/Program.cs @@ -0,0 +1,185 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// Licensed under the MIT License. + +#region Using Statements +// We will need several different libraries in this sample. +// Here, we expose these libraries to our program using the +// C# "using" statement, similar to the Q# "open" statement. + +// We will use the data model implemented by the Quantum Development Kit chemistry +// libraries. This model defines what a fermionic Hamiltonian is, and how to +// represent Hamiltonians on disk. +using Microsoft.Quantum.Chemistry.OrbitalIntegrals; +using Microsoft.Quantum.Chemistry.Fermion; +using Microsoft.Quantum.Chemistry.QSharpFormat; + +// To count gates, we'll use the trace simulator provided with +// the Quantum Development Kit. +using Microsoft.Quantum.Simulation.Simulators; + +// The System namespace provides a number of useful built-in +// types and methods that we'll use throughout this sample. +using System; + +// The System.Diagnostics namespace provides us with the +// Stopwatch class, which is quite useful for measuring +// how long each gate counting run takes. +using System.Diagnostics; + +// The System.Collections.Generic library provides many different +// utilities for working with collections such as lists and dictionaries. +using System.Collections.Generic; + +// We use the logging library provided with .NET Core to handle output +// in a robust way that makes it easy to turn on and off different messages. +using Microsoft.Extensions.Logging; + +// We use this for convenience methods for manipulating arrays. +using System.Linq; +#endregion + +namespace Microsoft.Quantum.Chemistry.Samples.Hydrogen +{ + class Program + { + static void Main(string[] args) + { + ////////////////////////////////////////////////////////////////////////// + // Introduction ////////////////////////////////////////////////////////// + ////////////////////////////////////////////////////////////////////////// + + // In this example, we will create a spin-orbital representation of the molecular + // Hydrogen Hamiltonian `H`, given overlap coefficients for its one- and + // two - electron integrals. + + // We when perform quantum phase estimation to obtain an estimate of + // the molecular Hydrogen ground state energy. + + #region Building the Hydrogen Hamiltonian through orbital integrals + + // This representation also has two occupied spin-orbitals. + var nElectrons = 2; + + // The Coulomb repulsion energy between nuclei is + var energyOffset = 0.713776188; + + // One-electron integrals are listed below + // <0|H|0> = -1.252477495 + // <1|H|1> = -0.475934275 + + // Two-electron integrals are listed below + // <00|H|00> = 0.674493166 + // <01|H|01> = 0.181287518 + // <01|H|10> = 0.663472101 + // <11|H|11> = 0.697398010 + // Note that orbitals are assumed to be real. Moreover, indistinguishability + // of electrons means that the following integrals are equal. + // = = = + // = = = = + // Thus it suffices to specify just any one of these terms from each symmetry + // group. + + // These orbital integrals are represented using the OrbitalIntegral + // data structure. + var orbitalIntegrals = new OrbitalIntegral[] + { + new OrbitalIntegral(new[] { 0,0 }, -1.252477495), + new OrbitalIntegral(new[] { 1,1 }, -0.475934275), + new OrbitalIntegral(new[] { 0,0,0,0 }, 0.674493166), + new OrbitalIntegral(new[] { 0,1,0,1 }, 0.181287518), + new OrbitalIntegral(new[] { 0,1,1,0 }, 0.663472101), + new OrbitalIntegral(new[] { 1,1,1,1 }, 0.697398010), + // Add the identity term + new OrbitalIntegral(new int[] { }, energyOffset) + }; + + // We initialize a fermion Hamiltonian data structure and add terms to it + var fermionHamiltonian = new OrbitalIntegralHamiltonian(orbitalIntegrals).ToFermionHamiltonian(); + + // These orbital integral terms are automatically expanded into + // spin-orbitals. We may print the Hamiltonian to see verify what it contains. + Console.WriteLine("----- Print Hamiltonian"); + Console.Write(fermionHamiltonian); + Console.WriteLine("----- End Print Hamiltonian \n"); + + // We also need to create an input quantum state to this Hamiltonian. + // Let us use the Hartree–Fock state. + var fermionWavefunction = fermionHamiltonian.CreateHartreeFockState(nElectrons); + #endregion + + #region Jordan–Wigner representation + // The Jordan–Wigner encoding converts the fermion Hamiltonian, + // expressed in terms of Fermionic operators, to a qubit Hamiltonian, + // expressed in terms of Pauli matrices. This is an essential step + // for simulating our constructed Hamiltonians on a qubit quantum + // computer. + Console.WriteLine("----- Creating Jordan–Wigner encoding"); + var jordanWignerEncoding = fermionHamiltonian.ToPauliHamiltonian(Paulis.QubitEncoding.JordanWigner); + Console.WriteLine("----- End Creating Jordan–Wigner encoding \n"); + + // Print the Jordan–Wigner encoded Hamiltonian to see verify what it contains. + Console.WriteLine("----- Print Hamiltonian"); + Console.Write(jordanWignerEncoding); + Console.WriteLine("----- End Print Hamiltonian \n"); + #endregion + + #region Performing the simulation + // We are now ready to run a quantum simulation of molecular Hydrogen. + // We will use this to obtain an estimate of its ground state energy. + + // Here, we make an instance of the simulator used to run our Q# code. + using (var qsim = new QuantumSimulator()) + { + + // This Jordan–Wigner data structure also contains a representation + // of the Hamiltonian and wavefunction made for consumption by the Q# algorithms. + var qSharpHamiltonianData = jordanWignerEncoding.ToQSharpFormat(); + var qSharpWavefunctionData = fermionWavefunction.ToQSharpFormat(); + var qSharpData = QSharpFormat.Convert.ToQSharpFormat(qSharpHamiltonianData, qSharpWavefunctionData); + + // We specify the bits of precision desired in the phase estimation + // algorithm + var bits = 7; + + // We specify the step-size of the simulated time-evolution + var trotterStep = 0.4; + + // Choose the Trotter integrator order + Int64 trotterOrder = 1; + + // As the quantum algorithm is probabilistic, let us run a few trials. + + // This may be compared to true value of + Console.WriteLine("Exact molecular Hydrogen ground state energy: -1.137260278.\n"); + Console.WriteLine("----- Performing quantum energy estimation by Trotter simulation algorithm"); + for (int i = 0; i < 5; i++) + { + // EstimateEnergyByTrotterization + // Name should make clear that it does it by trotterized + var (phaseEst, energyEst) = GetEnergyByTrotterization.Run(qsim, qSharpData, bits, trotterStep, trotterOrder).Result; + + Console.WriteLine($"Rep #{i+1}/5: Energy estimate: {energyEst}; Phase estimate: {phaseEst}"); + } + Console.WriteLine("----- End Performing quantum energy estimation by Trotter simulation algorithm\n"); + + Console.WriteLine("----- Performing quantum energy estimation by Qubitization simulation algorithm"); + for (int i = 0; i < 1; i++) + { + // EstimateEnergyByTrotterization + // Name should make clear that it does it by trotterized + var (phaseEst, energyEst) = GetEnergyByQubitization.Run(qsim, qSharpData, bits).Result; + + Console.WriteLine($"Rep #{i+1}/1: Energy estimate: {energyEst}; Phase estimate: {phaseEst}"); + } + Console.WriteLine("----- End Performing quantum energy estimation by Qubitization simulation algorithm\n"); + } + + Console.WriteLine("Press Enter to continue..."); + if (System.Diagnostics.Debugger.IsAttached) + { + Console.ReadLine(); + } + #endregion + } + } +} diff --git a/src/munchkin/tests/qsharp/hydrogen-sim/hydrogen-sim.csproj b/src/munchkin/tests/qsharp/hydrogen-sim/hydrogen-sim.csproj new file mode 100644 index 0000000..f930576 --- /dev/null +++ b/src/munchkin/tests/qsharp/hydrogen-sim/hydrogen-sim.csproj @@ -0,0 +1,22 @@ + + + + Library + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + Always + + + + diff --git a/src/munchkin/tests/qsharp/hydrogen-sim/libLLVM.dll b/src/munchkin/tests/qsharp/hydrogen-sim/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/hydrogen-sim/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/hydrogen-sim/qir/VQE.ll b/src/munchkin/tests/qsharp/hydrogen-sim/qir/VQE.ll new file mode 100644 index 0000000..c2881c6 --- /dev/null +++ b/src/munchkin/tests/qsharp/hydrogen-sim/qir/VQE.ll @@ -0,0 +1,31688 @@ + +%Tuple = type opaque +%Array = type opaque +%Callable = type opaque +%Result = type opaque +%Qubit = type opaque +%Range = type { i64, i64, i64 } +%String = type opaque + +@Microsoft__Quantum__Intrinsic__Reset__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Reset__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@0 = internal constant [75 x i8] c"operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0.\00" +@PartialApplication__1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper] +@MemoryManagement__1__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper] +@MemoryManagement__2__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@Microsoft__Quantum__Intrinsic__H__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] +@PartialApplication__3__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__S__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] +@1 = internal constant [38 x i8] c"MultiplexPauli failed. Invalid pauli \00" +@2 = internal constant [7 x i8] c"PauliX\00" +@3 = internal constant [7 x i8] c"PauliY\00" +@4 = internal constant [7 x i8] c"PauliZ\00" +@5 = internal constant [7 x i8] c"PauliI\00" +@6 = internal constant [2 x i8] c".\00" +@PartialApplication__4__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@PartialApplication__5__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@PartialApplication__6__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@PartialApplication__7__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctladj__wrapper] +@PartialApplication__8__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctladj__wrapper] +@PartialApplication__9__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] +@PartialApplication__10__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@PartialApplication__11__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] +@PartialApplication__12__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] +@PartialApplication__13__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__ctladj__wrapper] +@Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctladj__wrapper] +@MemoryManagement__3__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__3__RefCount, void (%Tuple*, i32)* @MemoryManagement__3__AliasCount] +@PartialApplication__14__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__ctladj__wrapper] +@Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctladj__wrapper] +@MemoryManagement__4__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__4__RefCount, void (%Tuple*, i32)* @MemoryManagement__4__AliasCount] +@PartialApplication__15__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__ctladj__wrapper] +@Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctladj__wrapper] +@MemoryManagement__5__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__5__RefCount, void (%Tuple*, i32)* @MemoryManagement__5__AliasCount] +@7 = internal constant [47 x i8] c"Control register shorter than control pattern.\00" +@PartialApplication__16__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__ctladj__wrapper] +@Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctladj__wrapper] +@MemoryManagement__6__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__6__RefCount, void (%Tuple*, i32)* @MemoryManagement__6__AliasCount] +@PartialApplication__17__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctladj__wrapper] +@Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctladj__wrapper] +@MemoryManagement__7__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__7__RefCount, void (%Tuple*, i32)* @MemoryManagement__7__AliasCount] +@PartialApplication__18__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__8__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__8__RefCount, void (%Tuple*, i32)* @MemoryManagement__8__AliasCount] +@PartialApplication__19__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctladj__wrapper] +@Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj__wrapper] +@MemoryManagement__9__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__9__RefCount, void (%Tuple*, i32)* @MemoryManagement__9__AliasCount] +@PartialApplication__20__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctladj__wrapper] +@Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj__wrapper] +@PartialApplication__21__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctladj__wrapper] +@Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj__wrapper] +@MemoryManagement__10__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__10__RefCount, void (%Tuple*, i32)* @MemoryManagement__10__AliasCount] +@8 = internal constant [11 x i8] c"Odd order \00" +@9 = internal constant [20 x i8] c" not yet supported.\00" +@PartialApplication__22__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctladj__wrapper] +@Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctladj__wrapper] +@MemoryManagement__11__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__11__RefCount, void (%Tuple*, i32)* @MemoryManagement__11__AliasCount] +@PartialApplication__23__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__12__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__12__RefCount, void (%Tuple*, i32)* @MemoryManagement__12__AliasCount] +@Microsoft__Quantum__Simulation__IdentityGeneratorIndex__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation__AddGeneratorSystems__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__24__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctladj__wrapper] +@MemoryManagement__13__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__13__RefCount, void (%Tuple*, i32)* @MemoryManagement__13__AliasCount] +@PartialApplication__25__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctladj__wrapper] +@MemoryManagement__14__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__14__RefCount, void (%Tuple*, i32)* @MemoryManagement__14__AliasCount] +@PartialApplication__26__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctladj__wrapper] +@MemoryManagement__15__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__15__RefCount, void (%Tuple*, i32)* @MemoryManagement__15__AliasCount] +@10 = internal constant [71 x i8] c"Specified output array length must be longer than `inputArray` length.\00" +@11 = internal constant [39 x i8] c"Array must be of the length at least 1\00" +@12 = internal constant [22 x i8] c"Index is out of bound\00" +@PartialApplication__27__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__16__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__16__RefCount, void (%Tuple*, i32)* @MemoryManagement__16__AliasCount] +@13 = internal constant [36 x i8] c"Qubit in invalid state. Expecting: \00" +@14 = internal constant [2 x i8] c"\22\00" +@15 = internal constant [13 x i8] c"\0A\09Expected:\09\00" +@16 = internal constant [5 x i8] c"true\00" +@17 = internal constant [6 x i8] c"false\00" +@18 = internal constant [11 x i8] c"\0A\09Actual:\09\00" +@19 = internal constant [33 x i8] c"`bits` must be between 0 and 63 \00" +@20 = internal constant [34 x i8] c"`number` must be between 0 and 2^\00" +@21 = internal constant [15 x i8] c" - 1, but was \00" +@PartialApplication__28__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctladj__wrapper] +@MemoryManagement__17__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__17__RefCount, void (%Tuple*, i32)* @MemoryManagement__17__AliasCount] +@PartialApplication__29__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctladj__wrapper] +@PartialApplication__30__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctladj__wrapper] +@MemoryManagement__18__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__18__RefCount, void (%Tuple*, i32)* @MemoryManagement__18__AliasCount] +@PartialApplication__31__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctladj__wrapper] +@MemoryManagement__19__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__19__RefCount, void (%Tuple*, i32)* @MemoryManagement__19__AliasCount] +@PartialApplication__32__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Math__ComplexPolar__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__ComplexPolar__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__20__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__20__RefCount, void (%Tuple*, i32)* @MemoryManagement__20__AliasCount] +@Microsoft__Quantum__Math__AbsD__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__AbsD__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__33__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__34__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__35__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@22 = internal constant [46 x i8] c"Unitary coupled-cluster PQRS failed: indices \00" +@23 = internal constant [3 x i8] c", \00" +@24 = internal constant [18 x i8] c" must be distinct\00" +@25 = internal constant [44 x i8] c"Unitary coupled-cluster PQ failed: indices \00" +@PartialApplication__36__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctladj__wrapper] +@MemoryManagement__21__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__21__RefCount, void (%Tuple*, i32)* @MemoryManagement__21__AliasCount] +@PartialApplication__37__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj__wrapper] +@MemoryManagement__22__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__22__RefCount, void (%Tuple*, i32)* @MemoryManagement__22__AliasCount] +@26 = internal constant [86 x i8] c"ComputeJordanWignerString failed. `idxFermions` must contain an even number of terms.\00" +@27 = internal constant [46 x i8] c"ComputeJordanWignerString failed. fermionIdx \00" +@28 = internal constant [15 x i8] c" out of range.\00" +@29 = internal constant [47 x i8] c"Completely invalid cluster operator specified.\00" +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__38__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__38__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__23__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__23__RefCount, void (%Tuple*, i32)* @MemoryManagement__23__AliasCount] +@Microsoft__Quantum__Intrinsic__X__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctladj__wrapper] +@PartialApplication__39__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__39__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__24__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__24__RefCount, void (%Tuple*, i32)* @MemoryManagement__24__AliasCount] +@PartialApplication__40__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__ctladj__wrapper] +@MemoryManagement__25__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__25__RefCount, void (%Tuple*, i32)* @MemoryManagement__25__AliasCount] +@PartialApplication__41__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__26__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__26__RefCount, void (%Tuple*, i32)* @MemoryManagement__26__AliasCount] +@PartialApplication__42__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__42__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__Measure__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Measure__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__27__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__27__RefCount, void (%Tuple*, i32)* @MemoryManagement__27__AliasCount] +@PartialApplication__43__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__43__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__HTermsToGenIdx__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__28__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__28__RefCount, void (%Tuple*, i32)* @MemoryManagement__28__AliasCount] + +define double @Microsoft__Quantum__Chemistry__VQE__GetEnergyVQE__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, double %theta1, double %theta2, double %theta3, i64 %nSamples) { +entry: + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 1 + %fermionTermData = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %fermionTermData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 2 + %inputState = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %JWInputStates = load %Array*, %Array** %63, align 8 + %64 = call i64 @__quantum__rt__array_get_size_1d(%Array* %JWInputStates) + %65 = sub i64 %64, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %66 = phi i64 [ 0, %exit__4 ], [ %77, %exiting__5 ] + %67 = icmp sle i64 %66, %65 + br i1 %67, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %66) + %69 = bitcast i8* %68 to { { double, double }*, %Array* }** + %70 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %69, align 8 + %71 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %70, i32 0, i32 0 + %72 = load { double, double }*, { double, double }** %71, align 8 + %73 = bitcast { double, double }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 1) + %74 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %70, i32 0, i32 1 + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 1) + %76 = bitcast { { double, double }*, %Array* }* %70 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %76, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %77 = add i64 %66, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + %78 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + %79 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 0 + %nSpinOrbitals = load i64, i64* %80, align 4 + %81 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %82 = phi i64 [ 0, %exit__5 ], [ %92, %exiting__6 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %82) + %85 = bitcast i8* %84 to { %Array*, %Array* }** + %86 = load { %Array*, %Array* }*, { %Array*, %Array* }** %85, align 8 + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + %89 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 1 + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 1) + %91 = bitcast { %Array*, %Array* }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %92 = add i64 %82, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %93 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %94 = phi i64 [ 0, %exit__6 ], [ %104, %exiting__7 ] + %95 = icmp sle i64 %94, %93 + br i1 %95, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %94) + %97 = bitcast i8* %96 to { %Array*, %Array* }** + %98 = load { %Array*, %Array* }*, { %Array*, %Array* }** %97, align 8 + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 1 + %102 = load %Array*, %Array** %101, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %103 = bitcast { %Array*, %Array* }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %104 = add i64 %94, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %105 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %106 = phi i64 [ 0, %exit__7 ], [ %116, %exiting__8 ] + %107 = icmp sle i64 %106, %105 + br i1 %107, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %106) + %109 = bitcast i8* %108 to { %Array*, %Array* }** + %110 = load { %Array*, %Array* }*, { %Array*, %Array* }** %109, align 8 + %111 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 0 + %112 = load %Array*, %Array** %111, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %112, i32 1) + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 1 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 1) + %115 = bitcast { %Array*, %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %115, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %116 = add i64 %106, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %117 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %118 = phi i64 [ 0, %exit__8 ], [ %128, %exiting__9 ] + %119 = icmp sle i64 %118, %117 + br i1 %119, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %118) + %121 = bitcast i8* %120 to { %Array*, %Array* }** + %122 = load { %Array*, %Array* }*, { %Array*, %Array* }** %121, align 8 + %123 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %122, i32 0, i32 0 + %124 = load %Array*, %Array** %123, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %124, i32 1) + %125 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %122, i32 0, i32 1 + %126 = load %Array*, %Array** %125, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %126, i32 1) + %127 = bitcast { %Array*, %Array* }* %122 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %127, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %128 = add i64 %118, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %129 = sub i64 %64, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %130 = phi i64 [ 0, %exit__9 ], [ %141, %exiting__10 ] + %131 = icmp sle i64 %130, %129 + br i1 %131, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %132 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %130) + %133 = bitcast i8* %132 to { { double, double }*, %Array* }** + %134 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %133, align 8 + %135 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %134, i32 0, i32 0 + %136 = load { double, double }*, { double, double }** %135, align 8 + %137 = bitcast { double, double }* %136 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %137, i32 1) + %138 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %134, i32 0, i32 1 + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + %140 = bitcast { { double, double }*, %Array* }* %134 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %140, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %141 = add i64 %130, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + %142 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 3 + %energyOffset = load double, double* %142, align 8 + %143 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 0 + %stateType = load i64, i64* %143, align 4 + %144 = sub i64 %64, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %145 = phi i64 [ 0, %exit__10 ], [ %156, %exiting__11 ] + %146 = icmp sle i64 %145, %144 + br i1 %146, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %145) + %148 = bitcast i8* %147 to { { double, double }*, %Array* }** + %149 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %148, align 8 + %150 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %149, i32 0, i32 0 + %151 = load { double, double }*, { double, double }** %150, align 8 + %152 = bitcast { double, double }* %151 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %152, i32 1) + %153 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %149, i32 0, i32 1 + %154 = load %Array*, %Array** %153, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 1) + %155 = bitcast { { double, double }*, %Array* }* %149 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %155, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %156 = add i64 %145, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + %157 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %158 = bitcast %Tuple* %157 to { double, double }* + %159 = getelementptr inbounds { double, double }, { double, double }* %158, i32 0, i32 0 + %160 = getelementptr inbounds { double, double }, { double, double }* %158, i32 0, i32 1 + store double %theta1, double* %159, align 8 + store double 0.000000e+00, double* %160, align 8 + %161 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 0) + %163 = bitcast i8* %162 to i64* + %164 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 1) + %165 = bitcast i8* %164 to i64* + store i64 2, i64* %163, align 4 + store i64 0, i64* %165, align 4 + %166 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %158, %Array* %161) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %157, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 -1) + %167 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %168 = bitcast %Tuple* %167 to { double, double }* + %169 = getelementptr inbounds { double, double }, { double, double }* %168, i32 0, i32 0 + %170 = getelementptr inbounds { double, double }, { double, double }* %168, i32 0, i32 1 + store double %theta2, double* %169, align 8 + store double 0.000000e+00, double* %170, align 8 + %171 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %171, i64 0) + %173 = bitcast i8* %172 to i64* + %174 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %171, i64 1) + %175 = bitcast i8* %174 to i64* + store i64 3, i64* %173, align 4 + store i64 1, i64* %175, align 4 + %176 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %168, %Array* %171) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %167, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %171, i32 -1) + %177 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %178 = bitcast %Tuple* %177 to { double, double }* + %179 = getelementptr inbounds { double, double }, { double, double }* %178, i32 0, i32 0 + %180 = getelementptr inbounds { double, double }, { double, double }* %178, i32 0, i32 1 + store double %theta3, double* %179, align 8 + store double 0.000000e+00, double* %180, align 8 + %181 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 0) + %183 = bitcast i8* %182 to i64* + %184 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 1) + %185 = bitcast i8* %184 to i64* + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 2) + %187 = bitcast i8* %186 to i64* + %188 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 3) + %189 = bitcast i8* %188 to i64* + store i64 2, i64* %183, align 4 + store i64 3, i64* %185, align 4 + store i64 1, i64* %187, align 4 + store i64 0, i64* %189, align 4 + %190 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %178, %Array* %181) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %177, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 -1) + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 0) + %192 = bitcast i8* %191 to { { double, double }*, %Array* }** + %193 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %192, align 8 + %194 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %193, i32 0, i32 0 + %195 = load { double, double }*, { double, double }** %194, align 8 + %196 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %193, i32 0, i32 1 + %197 = load %Array*, %Array** %196, align 8 + %198 = bitcast { double, double }* %195 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %198, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %197, i32 1) + %199 = bitcast { { double, double }*, %Array* }* %193 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %199, i32 1) + %200 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 0) + %202 = bitcast i8* %201 to { { double, double }*, %Array* }** + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 1) + %204 = bitcast i8* %203 to { { double, double }*, %Array* }** + %205 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 2) + %206 = bitcast i8* %205 to { { double, double }*, %Array* }** + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 3) + %208 = bitcast i8* %207 to { { double, double }*, %Array* }** + store { { double, double }*, %Array* }* %166, { { double, double }*, %Array* }** %202, align 8 + store { { double, double }*, %Array* }* %176, { { double, double }*, %Array* }** %204, align 8 + store { { double, double }*, %Array* }* %190, { { double, double }*, %Array* }** %206, align 8 + store { { double, double }*, %Array* }* %193, { { double, double }*, %Array* }** %208, align 8 + %209 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %inputStateParam = bitcast %Tuple* %209 to { i64, %Array* }* + %210 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputStateParam, i32 0, i32 0 + %211 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputStateParam, i32 0, i32 1 + store i64 %stateType, i64* %210, align 4 + store %Array* %200, %Array** %211, align 8 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %212 = phi i64 [ 0, %exit__11 ], [ %223, %exiting__12 ] + %213 = icmp sle i64 %212, 3 + br i1 %213, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %212) + %215 = bitcast i8* %214 to { { double, double }*, %Array* }** + %216 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %216, i32 0, i32 0 + %218 = load { double, double }*, { double, double }** %217, align 8 + %219 = bitcast { double, double }* %218 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %219, i32 1) + %220 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %216, i32 0, i32 1 + %221 = load %Array*, %Array** %220, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %221, i32 1) + %222 = bitcast { { double, double }*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %222, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %223 = add i64 %212, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 1) + %JWEncodedDataParam = call { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerEncodingData__body(i64 %nSpinOrbitals, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, { i64, %Array* }* %inputState, double %energyOffset) + %224 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i32 0, i32 1 + %225 = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %224, align 8 + %226 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 0 + %227 = load %Array*, %Array** %226, align 8 + %228 = call i64 @__quantum__rt__array_get_size_1d(%Array* %227) + %229 = sub i64 %228, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %230 = phi i64 [ 0, %exit__12 ], [ %240, %exiting__13 ] + %231 = icmp sle i64 %230, %229 + br i1 %231, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %232 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %230) + %233 = bitcast i8* %232 to { %Array*, %Array* }** + %234 = load { %Array*, %Array* }*, { %Array*, %Array* }** %233, align 8 + %235 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %234, i32 0, i32 0 + %236 = load %Array*, %Array** %235, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 1) + %237 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %234, i32 0, i32 1 + %238 = load %Array*, %Array** %237, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %238, i32 1) + %239 = bitcast { %Array*, %Array* }* %234 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %239, i32 1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %240 = add i64 %230, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %227, i32 1) + %241 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 1 + %242 = load %Array*, %Array** %241, align 8 + %243 = call i64 @__quantum__rt__array_get_size_1d(%Array* %242) + %244 = sub i64 %243, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %245 = phi i64 [ 0, %exit__13 ], [ %255, %exiting__14 ] + %246 = icmp sle i64 %245, %244 + br i1 %246, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %247 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %245) + %248 = bitcast i8* %247 to { %Array*, %Array* }** + %249 = load { %Array*, %Array* }*, { %Array*, %Array* }** %248, align 8 + %250 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %249, i32 0, i32 0 + %251 = load %Array*, %Array** %250, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %251, i32 1) + %252 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %249, i32 0, i32 1 + %253 = load %Array*, %Array** %252, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %253, i32 1) + %254 = bitcast { %Array*, %Array* }* %249 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %254, i32 1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %255 = add i64 %245, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 1) + %256 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 2 + %257 = load %Array*, %Array** %256, align 8 + %258 = call i64 @__quantum__rt__array_get_size_1d(%Array* %257) + %259 = sub i64 %258, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %260 = phi i64 [ 0, %exit__14 ], [ %270, %exiting__15 ] + %261 = icmp sle i64 %260, %259 + br i1 %261, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %262 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %260) + %263 = bitcast i8* %262 to { %Array*, %Array* }** + %264 = load { %Array*, %Array* }*, { %Array*, %Array* }** %263, align 8 + %265 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %264, i32 0, i32 0 + %266 = load %Array*, %Array** %265, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %266, i32 1) + %267 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %264, i32 0, i32 1 + %268 = load %Array*, %Array** %267, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %268, i32 1) + %269 = bitcast { %Array*, %Array* }* %264 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %269, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %270 = add i64 %260, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %257, i32 1) + %271 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 3 + %272 = load %Array*, %Array** %271, align 8 + %273 = call i64 @__quantum__rt__array_get_size_1d(%Array* %272) + %274 = sub i64 %273, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %275 = phi i64 [ 0, %exit__15 ], [ %285, %exiting__16 ] + %276 = icmp sle i64 %275, %274 + br i1 %276, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %277 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %275) + %278 = bitcast i8* %277 to { %Array*, %Array* }** + %279 = load { %Array*, %Array* }*, { %Array*, %Array* }** %278, align 8 + %280 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 0 + %281 = load %Array*, %Array** %280, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %281, i32 1) + %282 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 1 + %283 = load %Array*, %Array** %282, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %283, i32 1) + %284 = bitcast { %Array*, %Array* }* %279 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %284, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %285 = add i64 %275, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %272, i32 1) + %286 = bitcast { %Array*, %Array*, %Array*, %Array* }* %225 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %286, i32 1) + %287 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i32 0, i32 2 + %288 = load { i64, %Array* }*, { i64, %Array* }** %287, align 8 + %289 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %288, i32 0, i32 1 + %290 = load %Array*, %Array** %289, align 8 + %291 = call i64 @__quantum__rt__array_get_size_1d(%Array* %290) + %292 = sub i64 %291, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %293 = phi i64 [ 0, %exit__16 ], [ %304, %exiting__17 ] + %294 = icmp sle i64 %293, %292 + br i1 %294, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %295 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %293) + %296 = bitcast i8* %295 to { { double, double }*, %Array* }** + %297 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %296, align 8 + %298 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %297, i32 0, i32 0 + %299 = load { double, double }*, { double, double }** %298, align 8 + %300 = bitcast { double, double }* %299 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %300, i32 1) + %301 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %297, i32 0, i32 1 + %302 = load %Array*, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %302, i32 1) + %303 = bitcast { { double, double }*, %Array* }* %297 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %303, i32 1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %304 = add i64 %293, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %290, i32 1) + %305 = bitcast { i64, %Array* }* %288 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %305, i32 1) + %306 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %306, i32 1) + %307 = call double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateEnergy__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i64 %nSamples) + %308 = sub i64 %3, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %309 = phi i64 [ 0, %exit__17 ], [ %319, %exiting__18 ] + %310 = icmp sle i64 %309, %308 + br i1 %310, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %311 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %309) + %312 = bitcast i8* %311 to { %Array*, %Array* }** + %313 = load { %Array*, %Array* }*, { %Array*, %Array* }** %312, align 8 + %314 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %313, i32 0, i32 0 + %315 = load %Array*, %Array** %314, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %315, i32 -1) + %316 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %313, i32 0, i32 1 + %317 = load %Array*, %Array** %316, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %317, i32 -1) + %318 = bitcast { %Array*, %Array* }* %313 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %318, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %319 = add i64 %309, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %320 = sub i64 %18, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %321 = phi i64 [ 0, %exit__18 ], [ %331, %exiting__19 ] + %322 = icmp sle i64 %321, %320 + br i1 %322, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %323 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %321) + %324 = bitcast i8* %323 to { %Array*, %Array* }** + %325 = load { %Array*, %Array* }*, { %Array*, %Array* }** %324, align 8 + %326 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %325, i32 0, i32 0 + %327 = load %Array*, %Array** %326, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %327, i32 -1) + %328 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %325, i32 0, i32 1 + %329 = load %Array*, %Array** %328, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %329, i32 -1) + %330 = bitcast { %Array*, %Array* }* %325 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %330, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %331 = add i64 %321, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %332 = sub i64 %33, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %333 = phi i64 [ 0, %exit__19 ], [ %343, %exiting__20 ] + %334 = icmp sle i64 %333, %332 + br i1 %334, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %335 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %333) + %336 = bitcast i8* %335 to { %Array*, %Array* }** + %337 = load { %Array*, %Array* }*, { %Array*, %Array* }** %336, align 8 + %338 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %337, i32 0, i32 0 + %339 = load %Array*, %Array** %338, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %339, i32 -1) + %340 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %337, i32 0, i32 1 + %341 = load %Array*, %Array** %340, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %341, i32 -1) + %342 = bitcast { %Array*, %Array* }* %337 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %342, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %343 = add i64 %333, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %344 = sub i64 %48, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %345 = phi i64 [ 0, %exit__20 ], [ %355, %exiting__21 ] + %346 = icmp sle i64 %345, %344 + br i1 %346, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %347 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %345) + %348 = bitcast i8* %347 to { %Array*, %Array* }** + %349 = load { %Array*, %Array* }*, { %Array*, %Array* }** %348, align 8 + %350 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %349, i32 0, i32 0 + %351 = load %Array*, %Array** %350, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %351, i32 -1) + %352 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %349, i32 0, i32 1 + %353 = load %Array*, %Array** %352, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %353, i32 -1) + %354 = bitcast { %Array*, %Array* }* %349 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %354, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %355 = add i64 %345, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %356 = sub i64 %64, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %357 = phi i64 [ 0, %exit__21 ], [ %368, %exiting__22 ] + %358 = icmp sle i64 %357, %356 + br i1 %358, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %359 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %357) + %360 = bitcast i8* %359 to { { double, double }*, %Array* }** + %361 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %360, align 8 + %362 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %361, i32 0, i32 0 + %363 = load { double, double }*, { double, double }** %362, align 8 + %364 = bitcast { double, double }* %363 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %364, i32 -1) + %365 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %361, i32 0, i32 1 + %366 = load %Array*, %Array** %365, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %366, i32 -1) + %367 = bitcast { { double, double }*, %Array* }* %361 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %367, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %368 = add i64 %357, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + %369 = sub i64 %3, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %370 = phi i64 [ 0, %exit__22 ], [ %380, %exiting__23 ] + %371 = icmp sle i64 %370, %369 + br i1 %371, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %372 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %370) + %373 = bitcast i8* %372 to { %Array*, %Array* }** + %374 = load { %Array*, %Array* }*, { %Array*, %Array* }** %373, align 8 + %375 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %374, i32 0, i32 0 + %376 = load %Array*, %Array** %375, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %376, i32 -1) + %377 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %374, i32 0, i32 1 + %378 = load %Array*, %Array** %377, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %378, i32 -1) + %379 = bitcast { %Array*, %Array* }* %374 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %379, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %380 = add i64 %370, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %381 = sub i64 %18, 1 + br label %header__24 + +header__24: ; preds = %exiting__24, %exit__23 + %382 = phi i64 [ 0, %exit__23 ], [ %392, %exiting__24 ] + %383 = icmp sle i64 %382, %381 + br i1 %383, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %384 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %382) + %385 = bitcast i8* %384 to { %Array*, %Array* }** + %386 = load { %Array*, %Array* }*, { %Array*, %Array* }** %385, align 8 + %387 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %386, i32 0, i32 0 + %388 = load %Array*, %Array** %387, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %388, i32 -1) + %389 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %386, i32 0, i32 1 + %390 = load %Array*, %Array** %389, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %390, i32 -1) + %391 = bitcast { %Array*, %Array* }* %386 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %391, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %392 = add i64 %382, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %393 = sub i64 %33, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %394 = phi i64 [ 0, %exit__24 ], [ %404, %exiting__25 ] + %395 = icmp sle i64 %394, %393 + br i1 %395, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %396 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %394) + %397 = bitcast i8* %396 to { %Array*, %Array* }** + %398 = load { %Array*, %Array* }*, { %Array*, %Array* }** %397, align 8 + %399 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %398, i32 0, i32 0 + %400 = load %Array*, %Array** %399, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %400, i32 -1) + %401 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %398, i32 0, i32 1 + %402 = load %Array*, %Array** %401, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %402, i32 -1) + %403 = bitcast { %Array*, %Array* }* %398 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %403, i32 -1) + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %404 = add i64 %394, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %405 = sub i64 %48, 1 + br label %header__26 + +header__26: ; preds = %exiting__26, %exit__25 + %406 = phi i64 [ 0, %exit__25 ], [ %416, %exiting__26 ] + %407 = icmp sle i64 %406, %405 + br i1 %407, label %body__26, label %exit__26 + +body__26: ; preds = %header__26 + %408 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %406) + %409 = bitcast i8* %408 to { %Array*, %Array* }** + %410 = load { %Array*, %Array* }*, { %Array*, %Array* }** %409, align 8 + %411 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %410, i32 0, i32 0 + %412 = load %Array*, %Array** %411, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %412, i32 -1) + %413 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %410, i32 0, i32 1 + %414 = load %Array*, %Array** %413, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %414, i32 -1) + %415 = bitcast { %Array*, %Array* }* %410 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %415, i32 -1) + br label %exiting__26 + +exiting__26: ; preds = %body__26 + %416 = add i64 %406, 1 + br label %header__26 + +exit__26: ; preds = %header__26 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %417 = sub i64 %64, 1 + br label %header__27 + +header__27: ; preds = %exiting__27, %exit__26 + %418 = phi i64 [ 0, %exit__26 ], [ %429, %exiting__27 ] + %419 = icmp sle i64 %418, %417 + br i1 %419, label %body__27, label %exit__27 + +body__27: ; preds = %header__27 + %420 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %418) + %421 = bitcast i8* %420 to { { double, double }*, %Array* }** + %422 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %421, align 8 + %423 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %422, i32 0, i32 0 + %424 = load { double, double }*, { double, double }** %423, align 8 + %425 = bitcast { double, double }* %424 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %425, i32 -1) + %426 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %422, i32 0, i32 1 + %427 = load %Array*, %Array** %426, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %427, i32 -1) + %428 = bitcast { { double, double }*, %Array* }* %422 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %428, i32 -1) + br label %exiting__27 + +exiting__27: ; preds = %body__27 + %429 = add i64 %418, 1 + br label %header__27 + +exit__27: ; preds = %header__27 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + %430 = sub i64 %64, 1 + br label %header__28 + +header__28: ; preds = %exiting__28, %exit__27 + %431 = phi i64 [ 0, %exit__27 ], [ %442, %exiting__28 ] + %432 = icmp sle i64 %431, %430 + br i1 %432, label %body__28, label %exit__28 + +body__28: ; preds = %header__28 + %433 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %431) + %434 = bitcast i8* %433 to { { double, double }*, %Array* }** + %435 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %434, align 8 + %436 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %435, i32 0, i32 0 + %437 = load { double, double }*, { double, double }** %436, align 8 + %438 = bitcast { double, double }* %437 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %438, i32 -1) + %439 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %435, i32 0, i32 1 + %440 = load %Array*, %Array** %439, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %440, i32 -1) + %441 = bitcast { { double, double }*, %Array* }* %435 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %441, i32 -1) + br label %exiting__28 + +exiting__28: ; preds = %body__28 + %442 = add i64 %431, 1 + br label %header__28 + +exit__28: ; preds = %header__28 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + br label %header__29 + +header__29: ; preds = %exiting__29, %exit__28 + %443 = phi i64 [ 0, %exit__28 ], [ %454, %exiting__29 ] + %444 = icmp sle i64 %443, 3 + br i1 %444, label %body__29, label %exit__29 + +body__29: ; preds = %header__29 + %445 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %443) + %446 = bitcast i8* %445 to { { double, double }*, %Array* }** + %447 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %446, align 8 + %448 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %447, i32 0, i32 0 + %449 = load { double, double }*, { double, double }** %448, align 8 + %450 = bitcast { double, double }* %449 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %450, i32 -1) + %451 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %447, i32 0, i32 1 + %452 = load %Array*, %Array** %451, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %452, i32 -1) + %453 = bitcast { { double, double }*, %Array* }* %447 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %453, i32 -1) + br label %exiting__29 + +exiting__29: ; preds = %body__29 + %454 = add i64 %443, 1 + br label %header__29 + +exit__29: ; preds = %header__29 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 -1) + %455 = sub i64 %228, 1 + br label %header__30 + +header__30: ; preds = %exiting__30, %exit__29 + %456 = phi i64 [ 0, %exit__29 ], [ %466, %exiting__30 ] + %457 = icmp sle i64 %456, %455 + br i1 %457, label %body__30, label %exit__30 + +body__30: ; preds = %header__30 + %458 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %456) + %459 = bitcast i8* %458 to { %Array*, %Array* }** + %460 = load { %Array*, %Array* }*, { %Array*, %Array* }** %459, align 8 + %461 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %460, i32 0, i32 0 + %462 = load %Array*, %Array** %461, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %462, i32 -1) + %463 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %460, i32 0, i32 1 + %464 = load %Array*, %Array** %463, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %464, i32 -1) + %465 = bitcast { %Array*, %Array* }* %460 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %465, i32 -1) + br label %exiting__30 + +exiting__30: ; preds = %body__30 + %466 = add i64 %456, 1 + br label %header__30 + +exit__30: ; preds = %header__30 + call void @__quantum__rt__array_update_alias_count(%Array* %227, i32 -1) + %467 = sub i64 %243, 1 + br label %header__31 + +header__31: ; preds = %exiting__31, %exit__30 + %468 = phi i64 [ 0, %exit__30 ], [ %478, %exiting__31 ] + %469 = icmp sle i64 %468, %467 + br i1 %469, label %body__31, label %exit__31 + +body__31: ; preds = %header__31 + %470 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %468) + %471 = bitcast i8* %470 to { %Array*, %Array* }** + %472 = load { %Array*, %Array* }*, { %Array*, %Array* }** %471, align 8 + %473 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %472, i32 0, i32 0 + %474 = load %Array*, %Array** %473, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %474, i32 -1) + %475 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %472, i32 0, i32 1 + %476 = load %Array*, %Array** %475, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %476, i32 -1) + %477 = bitcast { %Array*, %Array* }* %472 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %477, i32 -1) + br label %exiting__31 + +exiting__31: ; preds = %body__31 + %478 = add i64 %468, 1 + br label %header__31 + +exit__31: ; preds = %header__31 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 -1) + %479 = sub i64 %258, 1 + br label %header__32 + +header__32: ; preds = %exiting__32, %exit__31 + %480 = phi i64 [ 0, %exit__31 ], [ %490, %exiting__32 ] + %481 = icmp sle i64 %480, %479 + br i1 %481, label %body__32, label %exit__32 + +body__32: ; preds = %header__32 + %482 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %480) + %483 = bitcast i8* %482 to { %Array*, %Array* }** + %484 = load { %Array*, %Array* }*, { %Array*, %Array* }** %483, align 8 + %485 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %484, i32 0, i32 0 + %486 = load %Array*, %Array** %485, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %486, i32 -1) + %487 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %484, i32 0, i32 1 + %488 = load %Array*, %Array** %487, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %488, i32 -1) + %489 = bitcast { %Array*, %Array* }* %484 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %489, i32 -1) + br label %exiting__32 + +exiting__32: ; preds = %body__32 + %490 = add i64 %480, 1 + br label %header__32 + +exit__32: ; preds = %header__32 + call void @__quantum__rt__array_update_alias_count(%Array* %257, i32 -1) + %491 = sub i64 %273, 1 + br label %header__33 + +header__33: ; preds = %exiting__33, %exit__32 + %492 = phi i64 [ 0, %exit__32 ], [ %502, %exiting__33 ] + %493 = icmp sle i64 %492, %491 + br i1 %493, label %body__33, label %exit__33 + +body__33: ; preds = %header__33 + %494 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %492) + %495 = bitcast i8* %494 to { %Array*, %Array* }** + %496 = load { %Array*, %Array* }*, { %Array*, %Array* }** %495, align 8 + %497 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %496, i32 0, i32 0 + %498 = load %Array*, %Array** %497, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %498, i32 -1) + %499 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %496, i32 0, i32 1 + %500 = load %Array*, %Array** %499, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %500, i32 -1) + %501 = bitcast { %Array*, %Array* }* %496 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %501, i32 -1) + br label %exiting__33 + +exiting__33: ; preds = %body__33 + %502 = add i64 %492, 1 + br label %header__33 + +exit__33: ; preds = %header__33 + call void @__quantum__rt__array_update_alias_count(%Array* %272, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %286, i32 -1) + %503 = sub i64 %291, 1 + br label %header__34 + +header__34: ; preds = %exiting__34, %exit__33 + %504 = phi i64 [ 0, %exit__33 ], [ %515, %exiting__34 ] + %505 = icmp sle i64 %504, %503 + br i1 %505, label %body__34, label %exit__34 + +body__34: ; preds = %header__34 + %506 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %504) + %507 = bitcast i8* %506 to { { double, double }*, %Array* }** + %508 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %507, align 8 + %509 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %508, i32 0, i32 0 + %510 = load { double, double }*, { double, double }** %509, align 8 + %511 = bitcast { double, double }* %510 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %511, i32 -1) + %512 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %508, i32 0, i32 1 + %513 = load %Array*, %Array** %512, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %513, i32 -1) + %514 = bitcast { { double, double }*, %Array* }* %508 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %514, i32 -1) + br label %exiting__34 + +exiting__34: ; preds = %body__34 + %515 = add i64 %504, 1 + br label %header__34 + +exit__34: ; preds = %header__34 + call void @__quantum__rt__array_update_alias_count(%Array* %290, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %305, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %306, i32 -1) + br label %header__35 + +header__35: ; preds = %exiting__35, %exit__34 + %516 = phi i64 [ 0, %exit__34 ], [ %527, %exiting__35 ] + %517 = icmp sle i64 %516, 3 + br i1 %517, label %body__35, label %exit__35 + +body__35: ; preds = %header__35 + %518 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %516) + %519 = bitcast i8* %518 to { { double, double }*, %Array* }** + %520 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %519, align 8 + %521 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %520, i32 0, i32 0 + %522 = load { double, double }*, { double, double }** %521, align 8 + %523 = bitcast { double, double }* %522 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %523, i32 -1) + %524 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %520, i32 0, i32 1 + %525 = load %Array*, %Array** %524, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %525, i32 -1) + %526 = bitcast { { double, double }*, %Array* }* %520 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %526, i32 -1) + br label %exiting__35 + +exiting__35: ; preds = %body__35 + %527 = add i64 %516, 1 + br label %header__35 + +exit__35: ; preds = %header__35 + call void @__quantum__rt__array_update_reference_count(%Array* %200, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %209, i32 -1) + %528 = sub i64 %228, 1 + br label %header__36 + +header__36: ; preds = %exiting__36, %exit__35 + %529 = phi i64 [ 0, %exit__35 ], [ %539, %exiting__36 ] + %530 = icmp sle i64 %529, %528 + br i1 %530, label %body__36, label %exit__36 + +body__36: ; preds = %header__36 + %531 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %529) + %532 = bitcast i8* %531 to { %Array*, %Array* }** + %533 = load { %Array*, %Array* }*, { %Array*, %Array* }** %532, align 8 + %534 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %533, i32 0, i32 0 + %535 = load %Array*, %Array** %534, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %535, i32 -1) + %536 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %533, i32 0, i32 1 + %537 = load %Array*, %Array** %536, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %537, i32 -1) + %538 = bitcast { %Array*, %Array* }* %533 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %538, i32 -1) + br label %exiting__36 + +exiting__36: ; preds = %body__36 + %539 = add i64 %529, 1 + br label %header__36 + +exit__36: ; preds = %header__36 + call void @__quantum__rt__array_update_reference_count(%Array* %227, i32 -1) + %540 = sub i64 %243, 1 + br label %header__37 + +header__37: ; preds = %exiting__37, %exit__36 + %541 = phi i64 [ 0, %exit__36 ], [ %551, %exiting__37 ] + %542 = icmp sle i64 %541, %540 + br i1 %542, label %body__37, label %exit__37 + +body__37: ; preds = %header__37 + %543 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %541) + %544 = bitcast i8* %543 to { %Array*, %Array* }** + %545 = load { %Array*, %Array* }*, { %Array*, %Array* }** %544, align 8 + %546 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %545, i32 0, i32 0 + %547 = load %Array*, %Array** %546, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %547, i32 -1) + %548 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %545, i32 0, i32 1 + %549 = load %Array*, %Array** %548, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %549, i32 -1) + %550 = bitcast { %Array*, %Array* }* %545 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %550, i32 -1) + br label %exiting__37 + +exiting__37: ; preds = %body__37 + %551 = add i64 %541, 1 + br label %header__37 + +exit__37: ; preds = %header__37 + call void @__quantum__rt__array_update_reference_count(%Array* %242, i32 -1) + %552 = sub i64 %258, 1 + br label %header__38 + +header__38: ; preds = %exiting__38, %exit__37 + %553 = phi i64 [ 0, %exit__37 ], [ %563, %exiting__38 ] + %554 = icmp sle i64 %553, %552 + br i1 %554, label %body__38, label %exit__38 + +body__38: ; preds = %header__38 + %555 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %553) + %556 = bitcast i8* %555 to { %Array*, %Array* }** + %557 = load { %Array*, %Array* }*, { %Array*, %Array* }** %556, align 8 + %558 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %557, i32 0, i32 0 + %559 = load %Array*, %Array** %558, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %559, i32 -1) + %560 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %557, i32 0, i32 1 + %561 = load %Array*, %Array** %560, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %561, i32 -1) + %562 = bitcast { %Array*, %Array* }* %557 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %562, i32 -1) + br label %exiting__38 + +exiting__38: ; preds = %body__38 + %563 = add i64 %553, 1 + br label %header__38 + +exit__38: ; preds = %header__38 + call void @__quantum__rt__array_update_reference_count(%Array* %257, i32 -1) + %564 = sub i64 %273, 1 + br label %header__39 + +header__39: ; preds = %exiting__39, %exit__38 + %565 = phi i64 [ 0, %exit__38 ], [ %575, %exiting__39 ] + %566 = icmp sle i64 %565, %564 + br i1 %566, label %body__39, label %exit__39 + +body__39: ; preds = %header__39 + %567 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %565) + %568 = bitcast i8* %567 to { %Array*, %Array* }** + %569 = load { %Array*, %Array* }*, { %Array*, %Array* }** %568, align 8 + %570 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %569, i32 0, i32 0 + %571 = load %Array*, %Array** %570, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %571, i32 -1) + %572 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %569, i32 0, i32 1 + %573 = load %Array*, %Array** %572, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %573, i32 -1) + %574 = bitcast { %Array*, %Array* }* %569 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %574, i32 -1) + br label %exiting__39 + +exiting__39: ; preds = %body__39 + %575 = add i64 %565, 1 + br label %header__39 + +exit__39: ; preds = %header__39 + call void @__quantum__rt__array_update_reference_count(%Array* %272, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %286, i32 -1) + %576 = sub i64 %291, 1 + br label %header__40 + +header__40: ; preds = %exiting__40, %exit__39 + %577 = phi i64 [ 0, %exit__39 ], [ %588, %exiting__40 ] + %578 = icmp sle i64 %577, %576 + br i1 %578, label %body__40, label %exit__40 + +body__40: ; preds = %header__40 + %579 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %577) + %580 = bitcast i8* %579 to { { double, double }*, %Array* }** + %581 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %580, align 8 + %582 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %581, i32 0, i32 0 + %583 = load { double, double }*, { double, double }** %582, align 8 + %584 = bitcast { double, double }* %583 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %584, i32 -1) + %585 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %581, i32 0, i32 1 + %586 = load %Array*, %Array** %585, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %586, i32 -1) + %587 = bitcast { { double, double }*, %Array* }* %581 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %587, i32 -1) + br label %exiting__40 + +exiting__40: ; preds = %body__40 + %588 = add i64 %577, 1 + br label %header__40 + +exit__40: ; preds = %header__40 + call void @__quantum__rt__array_update_reference_count(%Array* %290, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %305, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %306, i32 -1) + ret double %307 +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %0, %Array* %__Item3__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }*, %Array* }* getelementptr ({ { double, double }*, %Array* }, { { double, double }*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { double, double }*, %Array* }* + %3 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %2, i32 0, i32 1 + store { double, double }* %0, { double, double }** %3, align 8 + store %Array* %__Item3__, %Array** %4, align 8 + %5 = bitcast { double, double }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__Item3__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 -1) + ret { { double, double }*, %Array* }* %2 +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerEncodingData__body(i64 %__Item1__, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, { i64, %Array* }* %0, double %__Item5__) { +entry: + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* getelementptr ({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* null, i32 1) to i64)) + %63 = bitcast %Tuple* %62 to { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* + %64 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 0 + %65 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 1 + %66 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 2 + %67 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 3 + store i64 %__Item1__, i64* %64, align 4 + store { %Array*, %Array*, %Array*, %Array* }* %__Item2__, { %Array*, %Array*, %Array*, %Array* }** %65, align 8 + store { i64, %Array* }* %0, { i64, %Array* }** %66, align 8 + store double %__Item5__, double* %67, align 8 + %68 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 0 + %69 = load %Array*, %Array** %68, align 8 + %70 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 1 + %71 = load %Array*, %Array** %70, align 8 + %72 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 2 + %73 = load %Array*, %Array** %72, align 8 + %74 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 3 + %75 = load %Array*, %Array** %74, align 8 + %76 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %77 = load %Array*, %Array** %76, align 8 + %78 = call i64 @__quantum__rt__array_get_size_1d(%Array* %69) + %79 = sub i64 %78, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %80 = phi i64 [ 0, %exit__4 ], [ %90, %exiting__5 ] + %81 = icmp sle i64 %80, %79 + br i1 %81, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %80) + %83 = bitcast i8* %82 to { %Array*, %Array* }** + %84 = load { %Array*, %Array* }*, { %Array*, %Array* }** %83, align 8 + %85 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %84, i32 0, i32 0 + %86 = load %Array*, %Array** %85, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %86, i32 1) + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %84, i32 0, i32 1 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %88, i32 1) + %89 = bitcast { %Array*, %Array* }* %84 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %89, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %90 = add i64 %80, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 1) + %91 = call i64 @__quantum__rt__array_get_size_1d(%Array* %71) + %92 = sub i64 %91, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %93 = phi i64 [ 0, %exit__5 ], [ %103, %exiting__6 ] + %94 = icmp sle i64 %93, %92 + br i1 %94, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 %93) + %96 = bitcast i8* %95 to { %Array*, %Array* }** + %97 = load { %Array*, %Array* }*, { %Array*, %Array* }** %96, align 8 + %98 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %97, i32 0, i32 0 + %99 = load %Array*, %Array** %98, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 1) + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %97, i32 0, i32 1 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %101, i32 1) + %102 = bitcast { %Array*, %Array* }* %97 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %102, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %103 = add i64 %93, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 1) + %104 = call i64 @__quantum__rt__array_get_size_1d(%Array* %73) + %105 = sub i64 %104, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %106 = phi i64 [ 0, %exit__6 ], [ %116, %exiting__7 ] + %107 = icmp sle i64 %106, %105 + br i1 %107, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %106) + %109 = bitcast i8* %108 to { %Array*, %Array* }** + %110 = load { %Array*, %Array* }*, { %Array*, %Array* }** %109, align 8 + %111 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 0 + %112 = load %Array*, %Array** %111, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %112, i32 1) + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 1 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 1) + %115 = bitcast { %Array*, %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %115, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %116 = add i64 %106, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 1) + %117 = call i64 @__quantum__rt__array_get_size_1d(%Array* %75) + %118 = sub i64 %117, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %119 = phi i64 [ 0, %exit__7 ], [ %129, %exiting__8 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %75, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %129 = add i64 %119, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %75, i32 1) + %130 = bitcast { %Array*, %Array*, %Array*, %Array* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %130, i32 1) + %131 = call i64 @__quantum__rt__array_get_size_1d(%Array* %77) + %132 = sub i64 %131, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %133 = phi i64 [ 0, %exit__8 ], [ %144, %exiting__9 ] + %134 = icmp sle i64 %133, %132 + br i1 %134, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 %133) + %136 = bitcast i8* %135 to { { double, double }*, %Array* }** + %137 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %136, align 8 + %138 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %137, i32 0, i32 0 + %139 = load { double, double }*, { double, double }** %138, align 8 + %140 = bitcast { double, double }* %139 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %140, i32 1) + %141 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %137, i32 0, i32 1 + %142 = load %Array*, %Array** %141, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %142, i32 1) + %143 = bitcast { { double, double }*, %Array* }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %143, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %144 = add i64 %133, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %77, i32 1) + %145 = bitcast { i64, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %145, i32 1) + %146 = sub i64 %3, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %147 = phi i64 [ 0, %exit__9 ], [ %157, %exiting__10 ] + %148 = icmp sle i64 %147, %146 + br i1 %148, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %149 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %147) + %150 = bitcast i8* %149 to { %Array*, %Array* }** + %151 = load { %Array*, %Array* }*, { %Array*, %Array* }** %150, align 8 + %152 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %151, i32 0, i32 0 + %153 = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %153, i32 -1) + %154 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %151, i32 0, i32 1 + %155 = load %Array*, %Array** %154, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %155, i32 -1) + %156 = bitcast { %Array*, %Array* }* %151 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %156, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %157 = add i64 %147, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %158 = sub i64 %18, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %159 = phi i64 [ 0, %exit__10 ], [ %169, %exiting__11 ] + %160 = icmp sle i64 %159, %158 + br i1 %160, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %161 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %159) + %162 = bitcast i8* %161 to { %Array*, %Array* }** + %163 = load { %Array*, %Array* }*, { %Array*, %Array* }** %162, align 8 + %164 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %163, i32 0, i32 0 + %165 = load %Array*, %Array** %164, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %165, i32 -1) + %166 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %163, i32 0, i32 1 + %167 = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + %168 = bitcast { %Array*, %Array* }* %163 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %168, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %169 = add i64 %159, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %170 = sub i64 %33, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %181, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %171) + %174 = bitcast i8* %173 to { %Array*, %Array* }** + %175 = load { %Array*, %Array* }*, { %Array*, %Array* }** %174, align 8 + %176 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %175, i32 0, i32 0 + %177 = load %Array*, %Array** %176, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %177, i32 -1) + %178 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %175, i32 0, i32 1 + %179 = load %Array*, %Array** %178, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %179, i32 -1) + %180 = bitcast { %Array*, %Array* }* %175 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %180, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %181 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %182 = sub i64 %48, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %183 = phi i64 [ 0, %exit__12 ], [ %193, %exiting__13 ] + %184 = icmp sle i64 %183, %182 + br i1 %184, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %183) + %186 = bitcast i8* %185 to { %Array*, %Array* }** + %187 = load { %Array*, %Array* }*, { %Array*, %Array* }** %186, align 8 + %188 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %187, i32 0, i32 0 + %189 = load %Array*, %Array** %188, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %189, i32 -1) + %190 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %187, i32 0, i32 1 + %191 = load %Array*, %Array** %190, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %191, i32 -1) + %192 = bitcast { %Array*, %Array* }* %187 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %192, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %193 = add i64 %183, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + ret { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63 +} + +define internal double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateEnergy__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i64 %nSamples) { +entry: + %energy = alloca double, align 8 + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 1 + %jwTerms = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %jwTerms to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 2 + %inputState = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %64 = load %Array*, %Array** %63, align 8 + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %64) + %66 = sub i64 %65, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %67 = phi i64 [ 0, %exit__4 ], [ %78, %exiting__5 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %67) + %70 = bitcast i8* %69 to { { double, double }*, %Array* }** + %71 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %70, align 8 + %72 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 0 + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %74, i32 1) + %75 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { { double, double }*, %Array* }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %78 = add i64 %67, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %79 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + store double 0.000000e+00, double* %energy, align 8 + %81 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 0 + %nQubits = load i64, i64* %81, align 4 + %82 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %83 = phi i64 [ 0, %exit__5 ], [ %93, %exiting__6 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %83) + %86 = bitcast i8* %85 to { %Array*, %Array* }** + %87 = load { %Array*, %Array* }*, { %Array*, %Array* }** %86, align 8 + %88 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %89, i32 1) + %90 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 1 + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 1) + %92 = bitcast { %Array*, %Array* }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %92, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %93 = add i64 %83, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %94 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %95 = phi i64 [ 0, %exit__6 ], [ %105, %exiting__7 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %95) + %98 = bitcast i8* %97 to { %Array*, %Array* }** + %99 = load { %Array*, %Array* }*, { %Array*, %Array* }** %98, align 8 + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 0 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %101, i32 1) + %102 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { %Array*, %Array* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %105 = add i64 %95, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %106 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %107 = phi i64 [ 0, %exit__7 ], [ %117, %exiting__8 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %107) + %110 = bitcast i8* %109 to { %Array*, %Array* }** + %111 = load { %Array*, %Array* }*, { %Array*, %Array* }** %110, align 8 + %112 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 0 + %113 = load %Array*, %Array** %112, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %113, i32 1) + %114 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 1 + %115 = load %Array*, %Array** %114, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %115, i32 1) + %116 = bitcast { %Array*, %Array* }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %117 = add i64 %107, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %118 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %119 = phi i64 [ 0, %exit__8 ], [ %129, %exiting__9 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %129 = add i64 %119, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %130 = sub i64 %65, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %131 = phi i64 [ 0, %exit__9 ], [ %142, %exiting__10 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %131) + %134 = bitcast i8* %133 to { { double, double }*, %Array* }** + %135 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %134, align 8 + %136 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 0 + %137 = load { double, double }*, { double, double }** %136, align 8 + %138 = bitcast { double, double }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %138, i32 1) + %139 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 1 + %140 = load %Array*, %Array** %139, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + %141 = bitcast { { double, double }*, %Array* }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %142 = add i64 %131, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %143 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 3 + %energyOffset = load double, double* %143, align 8 + %144 = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %jwTerms) + %145 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %144, i32 0, i32 0 + %nTerms = load i64, i64* %145, align 4 + %146 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %144, i32 0, i32 1 + %indexFunction = load %Callable*, %Callable** %146, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %indexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %indexFunction, i32 1) + %147 = sub i64 %nTerms, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %idxTerm = phi i64 [ 0, %exit__10 ], [ %166, %exiting__11 ] + %148 = icmp sle i64 %idxTerm, %147 + br i1 %148, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %149 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %150 = bitcast %Tuple* %149 to { i64 }* + %151 = getelementptr inbounds { i64 }, { i64 }* %150, i32 0, i32 0 + store i64 %idxTerm, i64* %151, align 4 + %152 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %indexFunction, %Tuple* %149, %Tuple* %152) + %153 = bitcast %Tuple* %152 to { { { %Array*, %Array* }*, %Array* }* }* + %154 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %153, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %154, align 8 + %155 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %156 = load { %Array*, %Array* }*, { %Array*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %157, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %158 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 1 + %coeff = load %Array*, %Array** %158, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %159 = bitcast { %Array*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 1) + %160 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %160, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %161 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %163 = bitcast i8* %162 to i64* + %termType = load i64, i64* %163, align 4 + %ops = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__MeasurementOperators__body(i64 %nQubits, %Array* %idxFermions, i64 %termType) + %164 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ops) + %165 = sub i64 %164, 1 + br label %header__12 + +exiting__11: ; preds = %exit__15 + %166 = add i64 %idxTerm, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + %167 = load double, double* %energy, align 8 + %168 = fadd double %167, %energyOffset + %169 = sub i64 %3, 1 + br label %header__16 + +header__12: ; preds = %exiting__12, %body__11 + %170 = phi i64 [ 0, %body__11 ], [ %175, %exiting__12 ] + %171 = icmp sle i64 %170, %165 + br i1 %171, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %170) + %173 = bitcast i8* %172 to %Array** + %174 = load %Array*, %Array** %173, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %174, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %175 = add i64 %170, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %coeffs = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__ExpandedCoefficients__body(%Array* %coeff, i64 %termType) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 1) + %176 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %177 = load %Array*, %Array** %63, align 8 + %178 = call i64 @__quantum__rt__array_get_size_1d(%Array* %177) + %179 = sub i64 %178, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %180 = phi i64 [ 0, %exit__12 ], [ %191, %exiting__13 ] + %181 = icmp sle i64 %180, %179 + br i1 %181, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %177, i64 %180) + %183 = bitcast i8* %182 to { { double, double }*, %Array* }** + %184 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %183, align 8 + %185 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %184, i32 0, i32 0 + %186 = load { double, double }*, { double, double }** %185, align 8 + %187 = bitcast { double, double }* %186 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %187, i32 1) + %188 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %184, i32 0, i32 1 + %189 = load %Array*, %Array** %188, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %189, i32 1) + %190 = bitcast { { double, double }*, %Array* }* %184 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %190, i32 1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %191 = add i64 %180, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %177, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 1) + %192 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Array* }* }* getelementptr ({ %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* null, i32 1) to i64)) + %193 = bitcast %Tuple* %192 to { %Callable*, { i64, %Array* }* }* + %194 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %193, i32 0, i32 0 + %195 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %193, i32 0, i32 1 + store %Callable* %176, %Callable** %194, align 8 + store { i64, %Array* }* %inputState, { i64, %Array* }** %195, align 8 + %inputStateUnitary = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__41__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__26__FunctionTable, %Tuple* %192) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 1) + %jwTermEnergy = call double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateTermExpectation__body(%Callable* %inputStateUnitary, %Array* %ops, %Array* %coeffs, i64 %nQubits, i64 %nSamples) + %196 = load double, double* %energy, align 8 + %197 = fadd double %196, %jwTermEnergy + store double %197, double* %energy, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + %198 = sub i64 %164, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %199 = phi i64 [ 0, %exit__13 ], [ %204, %exiting__14 ] + %200 = icmp sle i64 %199, %198 + br i1 %200, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %199) + %202 = bitcast i8* %201 to %Array** + %203 = load %Array*, %Array** %202, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %203, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %204 = add i64 %199, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %149, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %159, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %152, i32 -1) + %205 = sub i64 %164, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %206 = phi i64 [ 0, %exit__14 ], [ %211, %exiting__15 ] + %207 = icmp sle i64 %206, %205 + br i1 %207, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %208 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %206) + %209 = bitcast i8* %208 to %Array** + %210 = load %Array*, %Array** %209, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %210, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %211 = add i64 %206, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeffs, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %inputStateUnitary, i32 -1) + br label %exiting__11 + +header__16: ; preds = %exiting__16, %exit__11 + %212 = phi i64 [ 0, %exit__11 ], [ %222, %exiting__16 ] + %213 = icmp sle i64 %212, %169 + br i1 %213, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %212) + %215 = bitcast i8* %214 to { %Array*, %Array* }** + %216 = load { %Array*, %Array* }*, { %Array*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 0 + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + %219 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 1 + %220 = load %Array*, %Array** %219, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %220, i32 -1) + %221 = bitcast { %Array*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %221, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %222 = add i64 %212, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %223 = sub i64 %18, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %224 = phi i64 [ 0, %exit__16 ], [ %234, %exiting__17 ] + %225 = icmp sle i64 %224, %223 + br i1 %225, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %226 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %224) + %227 = bitcast i8* %226 to { %Array*, %Array* }** + %228 = load { %Array*, %Array* }*, { %Array*, %Array* }** %227, align 8 + %229 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + %231 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 1 + %232 = load %Array*, %Array** %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %232, i32 -1) + %233 = bitcast { %Array*, %Array* }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %233, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %234 = add i64 %224, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %235 = sub i64 %33, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %236 = phi i64 [ 0, %exit__17 ], [ %246, %exiting__18 ] + %237 = icmp sle i64 %236, %235 + br i1 %237, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %238 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %236) + %239 = bitcast i8* %238 to { %Array*, %Array* }** + %240 = load { %Array*, %Array* }*, { %Array*, %Array* }** %239, align 8 + %241 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %240, i32 0, i32 0 + %242 = load %Array*, %Array** %241, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 -1) + %243 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %240, i32 0, i32 1 + %244 = load %Array*, %Array** %243, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %244, i32 -1) + %245 = bitcast { %Array*, %Array* }* %240 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %245, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %246 = add i64 %236, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %247 = sub i64 %48, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %248 = phi i64 [ 0, %exit__18 ], [ %258, %exiting__19 ] + %249 = icmp sle i64 %248, %247 + br i1 %249, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %250 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %248) + %251 = bitcast i8* %250 to { %Array*, %Array* }** + %252 = load { %Array*, %Array* }*, { %Array*, %Array* }** %251, align 8 + %253 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %252, i32 0, i32 0 + %254 = load %Array*, %Array** %253, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %254, i32 -1) + %255 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %252, i32 0, i32 1 + %256 = load %Array*, %Array** %255, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %256, i32 -1) + %257 = bitcast { %Array*, %Array* }* %252 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %257, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %258 = add i64 %248, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %259 = load %Array*, %Array** %63, align 8 + %260 = call i64 @__quantum__rt__array_get_size_1d(%Array* %259) + %261 = sub i64 %260, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %262 = phi i64 [ 0, %exit__19 ], [ %273, %exiting__20 ] + %263 = icmp sle i64 %262, %261 + br i1 %263, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %264 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %259, i64 %262) + %265 = bitcast i8* %264 to { { double, double }*, %Array* }** + %266 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %265, align 8 + %267 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %266, i32 0, i32 0 + %268 = load { double, double }*, { double, double }** %267, align 8 + %269 = bitcast { double, double }* %268 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %269, i32 -1) + %270 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %266, i32 0, i32 1 + %271 = load %Array*, %Array** %270, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %271, i32 -1) + %272 = bitcast { { double, double }*, %Array* }* %266 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %272, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %273 = add i64 %262, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %259, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + %274 = sub i64 %3, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %275 = phi i64 [ 0, %exit__20 ], [ %285, %exiting__21 ] + %276 = icmp sle i64 %275, %274 + br i1 %276, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %277 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %275) + %278 = bitcast i8* %277 to { %Array*, %Array* }** + %279 = load { %Array*, %Array* }*, { %Array*, %Array* }** %278, align 8 + %280 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 0 + %281 = load %Array*, %Array** %280, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %281, i32 -1) + %282 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 1 + %283 = load %Array*, %Array** %282, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %283, i32 -1) + %284 = bitcast { %Array*, %Array* }* %279 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %284, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %285 = add i64 %275, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %286 = sub i64 %18, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %287 = phi i64 [ 0, %exit__21 ], [ %297, %exiting__22 ] + %288 = icmp sle i64 %287, %286 + br i1 %288, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %289 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %287) + %290 = bitcast i8* %289 to { %Array*, %Array* }** + %291 = load { %Array*, %Array* }*, { %Array*, %Array* }** %290, align 8 + %292 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %291, i32 0, i32 0 + %293 = load %Array*, %Array** %292, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %293, i32 -1) + %294 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %291, i32 0, i32 1 + %295 = load %Array*, %Array** %294, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %295, i32 -1) + %296 = bitcast { %Array*, %Array* }* %291 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %296, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %297 = add i64 %287, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %298 = sub i64 %33, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %299 = phi i64 [ 0, %exit__22 ], [ %309, %exiting__23 ] + %300 = icmp sle i64 %299, %298 + br i1 %300, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %301 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %299) + %302 = bitcast i8* %301 to { %Array*, %Array* }** + %303 = load { %Array*, %Array* }*, { %Array*, %Array* }** %302, align 8 + %304 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %303, i32 0, i32 0 + %305 = load %Array*, %Array** %304, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %305, i32 -1) + %306 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %303, i32 0, i32 1 + %307 = load %Array*, %Array** %306, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %307, i32 -1) + %308 = bitcast { %Array*, %Array* }* %303 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %308, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %309 = add i64 %299, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %310 = sub i64 %48, 1 + br label %header__24 + +header__24: ; preds = %exiting__24, %exit__23 + %311 = phi i64 [ 0, %exit__23 ], [ %321, %exiting__24 ] + %312 = icmp sle i64 %311, %310 + br i1 %312, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %313 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %311) + %314 = bitcast i8* %313 to { %Array*, %Array* }** + %315 = load { %Array*, %Array* }*, { %Array*, %Array* }** %314, align 8 + %316 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %315, i32 0, i32 0 + %317 = load %Array*, %Array** %316, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %317, i32 -1) + %318 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %315, i32 0, i32 1 + %319 = load %Array*, %Array** %318, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %319, i32 -1) + %320 = bitcast { %Array*, %Array* }* %315 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %320, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %321 = add i64 %311, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %322 = sub i64 %260, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %323 = phi i64 [ 0, %exit__24 ], [ %334, %exiting__25 ] + %324 = icmp sle i64 %323, %322 + br i1 %324, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %325 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %259, i64 %323) + %326 = bitcast i8* %325 to { { double, double }*, %Array* }** + %327 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %326, align 8 + %328 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %327, i32 0, i32 0 + %329 = load { double, double }*, { double, double }** %328, align 8 + %330 = bitcast { double, double }* %329 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %330, i32 -1) + %331 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %327, i32 0, i32 1 + %332 = load %Array*, %Array** %331, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %332, i32 -1) + %333 = bitcast { { double, double }*, %Array* }* %327 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %333, i32 -1) + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %334 = add i64 %323, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_alias_count(%Array* %259, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %indexFunction, i32 -1) + %335 = bitcast { i64, %Callable* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %335, i32 -1) + ret double %168 +} + +define internal double @Microsoft__Quantum__Characterization__EstimateFrequency__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) { +entry: + %nUp = alloca i64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 1) + store i64 0, i64* %nUp, align 4 + %0 = sub i64 %nMeasurements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxMeasurement = phi i64 [ 0, %entry ], [ %16, %exiting__1 ] + %1 = icmp sle i64 %idxMeasurement, %0 + br i1 %1, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %register = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + store %Array* %register, %Array** %4, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %preparation, %Tuple* %2, %Tuple* null) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %register, %Array** %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %measurement, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { %Result* }* + %10 = getelementptr inbounds { %Result* }, { %Result* }* %9, i32 0, i32 0 + %result = load %Result*, %Result** %10, align 8 + %11 = call %Result* @__quantum__rt__result_get_zero() + %12 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %11) + br i1 %12, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %13 = load i64, i64* %nUp, align 4 + %14 = add i64 %13, 1 + store i64 %14, i64* %nUp, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Reset__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___8378d03f253249b0a8b7584c7ad801ff_ApplyToEach__body(%Callable* %15, %Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %register) + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %16 = add i64 %idxMeasurement, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %17 = load i64, i64* %nUp, align 4 + %18 = sitofp i64 %17 to double + %19 = sitofp i64 %nMeasurements to double + %20 = fdiv double %18, %19 + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 -1) + ret double %20 +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare %Result* @__quantum__rt__result_get_zero() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define internal void @Microsoft__Quantum__Canon___8378d03f253249b0a8b7584c7ad801ff_ApplyToEach__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %2) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 1) + %0 = call double @Microsoft__Quantum__Characterization__EstimateFrequency__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 -1) + ret double %0 +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceCP____body(double %tolerance, %Array* %coefficients) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %10) + %13 = bitcast i8* %12 to { double, double }** + %coefficient = load { double, double }*, { double, double }** %13, align 8 + %14 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %coefficient) + %16 = fcmp ogt double %15, %tolerance + br i1 %16, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %17 = sub i64 %0, 1 + br label %header__3 + +continue__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = sub i64 %0, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__3 ] + %21 = icmp sle i64 %20, %17 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %26 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +header__4: ; preds = %exiting__4, %exit__2 + %27 = phi i64 [ 0, %exit__2 ], [ %33, %exiting__4 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %27) + %30 = bitcast i8* %29 to { double, double }** + %31 = load { double, double }*, { double, double }** %30, align 8 + %32 = bitcast { double, double }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %33 = add i64 %27, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to double* + %coefficient = load double, double* %5, align 8 + %6 = call double @Microsoft__Quantum__Math__AbsD__body(double %coefficient) + %7 = fcmp oge double %6, %tolerance + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +continue__1: ; preds = %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal double @Microsoft__Quantum__Math__AbsD__body(double %a) { +entry: + %0 = fcmp olt double %a, 0.000000e+00 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = fneg double %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi double [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret double %2 +} + +define internal { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficients) { +entry: + %coefficients1 = alloca %Array*, align 8 + %coefficients0 = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %newCoefficientsLength = sdiv i64 %0, 2 + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %2 = sub i64 %newCoefficientsLength, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to double* + store double 0.000000e+00, double* %6, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %1, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %9 = sub i64 %newCoefficientsLength, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %14, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to double* + store double 0.000000e+00, double* %13, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %14 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %8, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %newCoefficientsLength, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxCoeff = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %16 = icmp sle i64 %idxCoeff, %15 + br i1 %16, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %17 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = add i64 %idxCoeff, %newCoefficientsLength + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %22) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %26 = fadd double %21, %25 + %27 = fmul double 5.000000e-01, %26 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idxCoeff) + %29 = bitcast i8* %28 to double* + store double %27, double* %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %coefficients0, align 8 + %30 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + %31 = call %Array* @__quantum__rt__array_copy(%Array* %30, i1 false) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %33 = bitcast i8* %32 to double* + %34 = load double, double* %33, align 8 + %35 = add i64 %idxCoeff, %newCoefficientsLength + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %35) + %37 = bitcast i8* %36 to double* + %38 = load double, double* %37, align 8 + %39 = fsub double %34, %38 + %40 = fmul double 5.000000e-01, %39 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %idxCoeff) + %42 = bitcast i8* %41 to double* + %43 = load double, double* %42, align 8 + store double %40, double* %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + store %Array* %31, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %idxCoeff, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %45 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 1) + %46 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Array*, %Array* }* + %49 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 1 + store %Array* %45, %Array** %49, align 8 + store %Array* %46, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + ret { %Array*, %Array* }* %48 +} + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +define internal double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) { +entry: + %0 = sitofp i64 %order to double + %1 = fsub double %0, 1.000000e+00 + %2 = fdiv double 1.000000e+00, %1 + %3 = call double @Microsoft__Quantum__Math__PowD__body(double 4.000000e+00, double %2) + %4 = fsub double 4.000000e+00, %3 + %5 = fdiv double 1.000000e+00, %4 + ret double %5 +} + +define internal double @Microsoft__Quantum__Math__PowD__body(double %x, double %y) { +entry: + %0 = call double @llvm.pow.f64(double %x, double %y) + ret double %0 +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__y__body(%Qubit*) + +declare void @__quantum__qis__z__body(%Qubit*) + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__y__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 %pauli, i1 %bitApply, %Array* %bits, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %3) + %6 = bitcast i8* %5 to { i1, %Qubit* }** + %7 = load { i1, %Qubit* }*, { i1, %Qubit* }** %6, align 8 + %8 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %7, i32 0, i32 0 + %bit = load i1, i1* %8, align 1 + %9 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %7, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %9, align 8 + %10 = icmp eq i1 %bit, %bitApply + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %11 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %12 = sub i64 %1, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %13) + %16 = bitcast i8* %15 to { i1, %Qubit* }** + %17 = load { i1, %Qubit* }*, { i1, %Qubit* }** %16, align 8 + %18 = bitcast { i1, %Qubit* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i1* + %7 = load i1, i1* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i1, %Qubit* }* + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + store i1 %7, i1* %13, align 1 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i1, %Qubit* }** + store { i1, %Qubit* }* %12, { i1, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i1, %Qubit* }** + %27 = load { i1, %Qubit* }*, { i1, %Qubit* }** %26, align 8 + %28 = bitcast { i1, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i1* + %36 = load i1, i1* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i1, %Qubit* }* + %42 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 1 + store i1 %36, i1* %42, align 1 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i1, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i1, %Qubit* }*, { i1, %Qubit* }** %45, align 8 + %47 = bitcast { i1, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i1, %Qubit* }* %41, { i1, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i1, %Qubit* }** + %56 = load { i1, %Qubit* }*, { i1, %Qubit* }** %55, align 8 + %57 = bitcast { i1, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 %pauli, i1 %bitApply, %Array* %bits, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %__qsVar0__nBits__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %1 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + %4 = insertvalue %Range zeroinitializer, i64 %3, 0 + %5 = insertvalue %Range %4, i64 -1, 1 + %6 = insertvalue %Range %5, i64 0, 2 + %7 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %6, i1 true) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %7) + %9 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %10 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 %10) + %13 = bitcast i8* %12 to { i1, %Qubit* }** + %14 = load { i1, %Qubit* }*, { i1, %Qubit* }** %13, align 8 + %15 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %14, i32 0, i32 0 + %__qsVar1__bit__ = load i1, i1* %15, align 1 + %16 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %14, i32 0, i32 1 + %__qsVar2__qubit__ = load %Qubit*, %Qubit** %16, align 8 + %17 = icmp eq i1 %__qsVar1__bit__, %bitApply + br i1 %17, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %__qsVar2__qubit__) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %21) + %24 = bitcast i8* %23 to { i1, %Qubit* }** + %25 = load { i1, %Qubit* }*, { i1, %Qubit* }** %24, align 8 + %26 = bitcast { i1, %Qubit* }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %29) + %32 = bitcast i8* %31 to { i1, %Qubit* }** + %33 = load { i1, %Qubit* }*, { i1, %Qubit* }** %32, align 8 + %34 = bitcast { i1, %Qubit* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__ctl(%Array* %__controlQubits__, { i2, i1, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 1 + %bitApply = load i1, i1* %2, align 1 + %3 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 2 + %bits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %4 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %5 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %7 = sub i64 %6, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %8 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %9 = icmp sle i64 %8, %7 + br i1 %9, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %8) + %11 = bitcast i8* %10 to { i1, %Qubit* }** + %12 = load { i1, %Qubit* }*, { i1, %Qubit* }** %11, align 8 + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %bit = load i1, i1* %13, align 1 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %14, align 8 + %15 = icmp eq i1 %bit, %bitApply + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i2, %Qubit* }* + %18 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %17, i32 0, i32 1 + store i2 %pauli, i2* %18, align 1 + store %Qubit* %qubit, %Qubit** %19, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %17) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %20 = add i64 %8, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %21 = sub i64 %6, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %22 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %22) + %25 = bitcast i8* %24 to { i1, %Qubit* }** + %26 = load { i1, %Qubit* }*, { i1, %Qubit* }** %25, align 8 + %27 = bitcast { i1, %Qubit* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %22, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__ctladj(%Array* %__controlQubits__, { i2, i1, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 1 + %bitApply = load i1, i1* %2, align 1 + %3 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 2 + %bits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %4 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %__qsVar0__nBits__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %5 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %6 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %5, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %15 = phi i64 [ 0, %entry ], [ %27, %exiting__1 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to { i1, %Qubit* }** + %19 = load { i1, %Qubit* }*, { i1, %Qubit* }** %18, align 8 + %20 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %19, i32 0, i32 0 + %__qsVar1__bit__ = load i1, i1* %20, align 1 + %21 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %19, i32 0, i32 1 + %__qsVar2__qubit__ = load %Qubit*, %Qubit** %21, align 8 + %22 = icmp eq i1 %__qsVar1__bit__, %bitApply + br i1 %22, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i2, %Qubit* }* + %25 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %24, i32 0, i32 1 + store i2 %pauli, i2* %25, align 1 + store %Qubit* %__qsVar2__qubit__, %Qubit** %26, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %24) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %27 = add i64 %15, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %28 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %29 = sub i64 %28, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %30 = phi i64 [ 0, %exit__1 ], [ %36, %exiting__2 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %30) + %33 = bitcast i8* %32 to { i1, %Qubit* }** + %34 = load { i1, %Qubit* }*, { i1, %Qubit* }** %33, align 8 + %35 = bitcast { i1, %Qubit* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %30, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + %37 = sub i64 %7, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %38) + %41 = bitcast i8* %40 to { i1, %Qubit* }** + %42 = load { i1, %Qubit* }*, { i1, %Qubit* }** %41, align 8 + %43 = bitcast { i1, %Qubit* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %11 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %12 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %11) + %13 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %12, %Qubit* %13) + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %15 = icmp eq i64 %14, 2 + br i1 %15, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call double @Microsoft__Quantum__Math__AbsD__body(double %18) + %20 = fcmp ogt double %19, %tolerance + br i1 %20, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %22 = bitcast i8* %21 to i2* + store i2 0, i2* %22, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %theta = fmul double 1.000000e+00, %25 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %26 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %27 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %26) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients0, { %Array* }* %27) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %27, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %31 = getelementptr inbounds { %Array* }, { %Array* }* %12, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp eq i64 %0, 0 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %1 +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__fail(%String*) + +define internal %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %nElementsTotal, double %defaultElement, %Array* %inputArray) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %0 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @10, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %0, i1 true, %String* %1) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___8db1b1d8b63441b583b7338681e3b5b2_ConstantArray__body(i64 %nElementsPad, double %defaultElement) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %2 = icmp sge i64 %nElementsTotal, 0 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %3 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %5 = phi %Array* [ %3, %condTrue__1 ], [ %4, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %5 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.powi.f64.i32(double, i32) #0 + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %23 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %24, %Qubit* %target) + %25 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %25, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %26 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %26, %Qubit* %target) + %27 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %1) + %28 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %27) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %28, %Qubit* %target) + %29 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %29, %Qubit* %target) + %30 = getelementptr inbounds { %Array* }, { %Array* }* %28, i32 0, i32 0 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + %32 = bitcast { %Array* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %33 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + ret void +} + +define internal { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %__Item1__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %__Item1__, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__Item1__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 -1) + ret { %Array* }* %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 2 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + %3 = call %Array* @__quantum__rt__array_slice_1d(%Array* %array, %Range %2, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + ret %Array* %3 +} + +define internal %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp sgt i64 %0, 0 + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @11, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %1, i1 true, %String* %2) + %3 = sub i64 %0, 1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + ret %Qubit* %6 +} + +declare void @__quantum__qis__exp__body(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %12 = icmp eq i64 %11, 2 + br i1 %12, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %14 = bitcast i8* %13 to double* + %15 = load double, double* %14, align 8 + %16 = call double @Microsoft__Quantum__Math__AbsD__body(double %15) + %17 = fcmp ogt double %16, %tolerance + br i1 %17, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %19 = bitcast i8* %18 to i2* + store i2 0, i2* %19, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %21 = bitcast i8* %20 to double* + %22 = load double, double* %21, align 8 + %theta = fmul double 1.000000e+00, %22 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %23 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %24) + %25 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + %27 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %28 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %29 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %28) + %30 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %29, %Qubit* %30) + %31 = getelementptr inbounds { %Array* }, { %Array* }* %29, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__adj(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %23 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %23, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %24 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %24, %Qubit* %target) + %25 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %1) + %26 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %25) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %26, %Qubit* %target) + %27 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %27, %Qubit* %target) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %26, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %31 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %1) + %32 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %31) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %32, %Qubit* %target) + %33 = getelementptr inbounds { %Array* }, { %Array* }* %32, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %32 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 1) + %15 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %16 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + %17 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %qubits__1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, { %Array* }*, %Qubit* }* + %20 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 3 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients1, %Array** %21, align 8 + store { %Array* }* %16, { %Array* }** %22, align 8 + store %Qubit* %17, %Qubit** %23, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %19) + %24 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %25 = icmp eq i64 %24, 2 + br i1 %25, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %27 = bitcast i8* %26 to double* + %28 = load double, double* %27, align 8 + %29 = call double @Microsoft__Quantum__Math__AbsD__body(double %28) + %30 = fcmp ogt double %29, %tolerance + br i1 %30, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %32 = bitcast i8* %31 to i2* + store i2 0, i2* %32, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %34 = bitcast i8* %33 to double* + %35 = load double, double* %34, align 8 + %theta = fmul double 1.000000e+00, %35 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, double, %Array* }* + %38 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 1 + %40 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 2 + store %Array* %paulis, %Array** %38, align 8 + store double %theta, double* %39, align 8 + store %Array* %qubits__1, %Array** %40, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %37) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 1) + %41 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %42 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %41) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { double, %Array*, { %Array* }* }* + %45 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 2 + store double %tolerance, double* %45, align 8 + store %Array* %coefficients0, %Array** %46, align 8 + store { %Array* }* %42, { %Array* }** %47, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %44) + %48 = getelementptr inbounds { %Array* }, { %Array* }* %42, i32 0, i32 0 + %49 = load %Array*, %Array** %48, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %50 = bitcast { %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %51 = getelementptr inbounds { %Array* }, { %Array* }* %16, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %control, %Qubit* %target) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctl(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %16 = icmp eq i64 %15, 2 + br i1 %16, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %18 = bitcast i8* %17 to double* + %19 = load double, double* %18, align 8 + %20 = call double @Microsoft__Quantum__Math__AbsD__body(double %19) + %21 = fcmp ogt double %20, %tolerance + br i1 %21, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + store i2 0, i2* %23, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %25 = bitcast i8* %24 to double* + %26 = load double, double* %25, align 8 + %theta = fmul double 1.000000e+00, %26 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, double, %Array* }* + %29 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 2 + store %Array* %paulis, %Array** %29, align 8 + store double %theta, double* %30, align 8 + store %Array* %qubits__1, %Array** %31, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %28) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 1) + %32 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %33 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %32) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double, %Array*, { %Array* }* }* + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 1 + %38 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 2 + store double %tolerance, double* %36, align 8 + store %Array* %__qsVar1__coefficients0__, %Array** %37, align 8 + store { %Array* }* %33, { %Array* }** %38, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %35) + %39 = getelementptr inbounds { %Array* }, { %Array* }* %33, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + %41 = bitcast { %Array* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 1) + %42 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %43 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %42) + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + %44 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %qubits__1) + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { double, %Array*, { %Array* }*, %Qubit* }* + %47 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 1 + %49 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 2 + %50 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 3 + store double %tolerance, double* %47, align 8 + store %Array* %__qsVar2__coefficients1__, %Array** %48, align 8 + store { %Array* }* %43, { %Array* }** %49, align 8 + store %Qubit* %44, %Qubit** %50, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %46) + %51 = getelementptr inbounds { %Array* }, { %Array* }* %43, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctladj(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %target, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %14 = icmp eq i2 %pauli, 1 + br i1 %14, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %18 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 4 + store %Callable* %15, %Callable** %18, align 8 + store double %tolerance, double* %19, align 8 + store %Array* %coefficients, %Array** %20, align 8 + store i2 -2, i2* %21, align 1 + store { %Array* }* %control, { %Array* }** %22, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %16) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__body(%Callable* %23, %Callable* %op__1, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %24 = icmp eq i2 %pauli, -1 + br i1 %24, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %25 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 2 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 3 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 4 + store %Callable* %25, %Callable** %28, align 8 + store double %tolerance, double* %29, align 8 + store %Array* %coefficients, %Array** %30, align 8 + store i2 1, i2* %31, align 1 + store { %Array* }* %control, { %Array* }** %32, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %26) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %33 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__body(%Callable* %33, %Callable* %op__2, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %34 = icmp eq i2 %pauli, 0 + br i1 %34, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %35 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @1, i32 0, i32 0)) + %36 = icmp eq i2 1, %pauli + br i1 %36, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %38 = icmp eq i2 -1, %pauli + br i1 %38, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %39 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %40 = icmp eq i2 -2, %pauli + br i1 %40, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %43 = phi %String* [ %41, %condTrue__3 ], [ %42, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %44 = phi %String* [ %39, %condTrue__2 ], [ %43, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %45 = phi %String* [ %37, %condTrue__1 ], [ %44, %condContinue__2 ] + %46 = call %String* @__quantum__rt__string_concatenate(%String* %35, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %48) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__body(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Qubit* }* + %2 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %1, i32 0, i32 0 + store %Qubit* %target, %Qubit** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %0, %Tuple* null) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit* }* + %5 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %4, i32 0, i32 0 + store %Qubit* %target, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %innerOperation, %Tuple* %3, %Tuple* null) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Qubit* }* + %9 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %8, i32 0, i32 0 + store %Qubit* %target, %Qubit** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Qubit* }* + %14 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %13, i32 0, i32 0 + store %Qubit* %target, %Qubit** %14, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %15 = icmp eq i2 %pauli, 1 + br i1 %15, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 2 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 3 + %23 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 4 + store %Callable* %16, %Callable** %19, align 8 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients, %Array** %21, align 8 + store i2 -2, i2* %22, align 1 + store { %Array* }* %control, { %Array* }** %23, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %17) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %24 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__adj(%Callable* %24, %Callable* %__qsVar1__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %25 = icmp eq i2 %pauli, -1 + br i1 %25, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %26 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 2 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 3 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 4 + store %Callable* %26, %Callable** %29, align 8 + store double %tolerance, double* %30, align 8 + store %Array* %coefficients, %Array** %31, align 8 + store i2 1, i2* %32, align 1 + store { %Array* }* %control, { %Array* }** %33, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %27) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %34 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %34) + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__adj(%Callable* %34, %Callable* %__qsVar2__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %35 = icmp eq i2 %pauli, 0 + br i1 %35, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %36 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @1, i32 0, i32 0)) + %37 = icmp eq i2 1, %pauli + br i1 %37, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %38 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %39 = icmp eq i2 -1, %pauli + br i1 %39, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %40 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %41 = icmp eq i2 -2, %pauli + br i1 %41, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %43 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %44 = phi %String* [ %42, %condTrue__3 ], [ %43, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %45 = phi %String* [ %40, %condTrue__2 ], [ %44, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %46 = phi %String* [ %38, %condTrue__1 ], [ %45, %condContinue__2 ] + %47 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %46) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + %48 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %49 = call %String* @__quantum__rt__string_concatenate(%String* %47, %String* %48) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %49) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %op__1, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %op__2, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @1, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %17) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %__qsVar1__op__, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %__qsVar2__op__, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @1, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__adj(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Qubit* }* + %3 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %2, i32 0, i32 0 + store %Qubit* %target, %Qubit** %3, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %1, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Qubit* }* + %11 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %10, i32 0, i32 0 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctl(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Qubit* }* + %6 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %5, i32 0, i32 0 + store %Qubit* %target, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %4, %Tuple* null) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %8, %Tuple* null) + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Qubit* }* + %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 + store %Qubit* %target, %Qubit** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctladj(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Qubit* }* + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 1 + store %Array* %controlRegister, %Array** %11, align 8 + store %Qubit* %target, %Qubit** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Qubit* }* + %16 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %15, i32 0, i32 0 + store %Qubit* %target, %Qubit** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %14, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rng) { +entry: + %0 = extractvalue %Range %rng, 0 + %1 = extractvalue %Range %rng, 1 + %2 = extractvalue %Range %rng, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %3 = icmp sgt i64 %1, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %0, %preheader__1 ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %idx, %2 + %5 = icmp sge i64 %idx, %2 + %6 = select i1 %3, i1 %4, i1 %5 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + ret i1 false + +exiting__1: ; No predecessors! + %7 = add i64 %idx, %1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret i1 true +} + +define internal %Callable* @Microsoft__Quantum__Canon__MultiplexerBruteForceFromGenerator__body(i64 %0, %Callable* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %unitaryGenerator = bitcast %Tuple* %2 to { i64, %Callable* }* + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %4 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store %Callable* %1, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Callable* }* }* getelementptr ({ %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store %Callable* %5, %Callable** %8, align 8 + store { i64, %Callable* }* %unitaryGenerator, { i64, %Callable* }** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__13__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__3__FunctionTable, %Tuple* %6) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret %Callable* %10 +} + +define internal void @Lifted__PartialApplication__13__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__body({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__adj({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctl(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctladj(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__3__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__3__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__body({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %nIndex to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %nStates = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %7, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %8 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nStates, i64 %nUnitaries) + %9 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxOp = phi i64 [ 0, %entry ], [ %24, %exiting__1 ] + %10 = icmp sle i64 %idxOp, %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64 }* + %13 = getelementptr inbounds { i64 }, { i64 }* %12, i32 0, i32 0 + store i64 %idxOp, i64* %13, align 4 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %11, %Tuple* %14) + %15 = bitcast %Tuple* %14 to { %Callable* }* + %16 = getelementptr inbounds { %Callable* }, { %Callable* }* %15, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %idxOp, %Callable* %17) + %19 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, %Array* }* + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + store %Array* %19, %Array** %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %idxOp, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + %25 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__adj({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryFunction__ = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %__qsVar0__nIndex__ to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %__qsVar1__nStates__ = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %7, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %8 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar1__nStates__, i64 %__qsVar2__nUnitaries__) + %9 = sub i64 %8, 1 + %10 = sub i64 %9, 0 + %11 = sdiv i64 %10, 1 + %12 = mul i64 1, %11 + %13 = add i64 0, %12 + %14 = insertvalue %Range zeroinitializer, i64 %13, 0 + %15 = insertvalue %Range %14, i64 -1, 1 + %16 = insertvalue %Range %15, i64 0, 2 + %17 = extractvalue %Range %16, 0 + %18 = extractvalue %Range %16, 1 + %19 = extractvalue %Range %16, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %20 = icmp sgt i64 %18, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar4__idxOp__ = phi i64 [ %17, %preheader__1 ], [ %38, %exiting__1 ] + %21 = icmp sle i64 %__qsVar4__idxOp__, %19 + %22 = icmp sge i64 %__qsVar4__idxOp__, %19 + %23 = select i1 %20, i1 %21, i1 %22 + br i1 %23, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64 }* + %26 = getelementptr inbounds { i64 }, { i64 }* %25, i32 0, i32 0 + store i64 %__qsVar4__idxOp__, i64* %26, align 4 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__unitaryFunction__, %Tuple* %24, %Tuple* %27) + %28 = bitcast %Tuple* %27 to { %Callable* }* + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + %31 = call %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %__qsVar4__idxOp__, %Callable* %30) + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %32) + %33 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { %Array*, %Array* }* + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + store %Array* %33, %Array** %36, align 8 + store %Array* %target, %Array** %37, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %34, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %38 = add i64 %__qsVar4__idxOp__, %18 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + %39 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %9 = trunc i64 %nIndex to i32 + %10 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %9) + %nStates = fptosi double %10 to i64 + %11 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %11, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %12 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nStates, i64 %nUnitaries) + %13 = sub i64 %12, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxOp = phi i64 [ 0, %entry ], [ %33, %exiting__1 ] + %14 = icmp sle i64 %idxOp, %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { i64 }* + %17 = getelementptr inbounds { i64 }, { i64 }* %16, i32 0, i32 0 + store i64 %idxOp, i64* %17, align 4 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %15, %Tuple* %18) + %19 = bitcast %Tuple* %18 to { %Callable* }* + %20 = getelementptr inbounds { %Callable* }, { %Callable* }* %19, i32 0, i32 0 + %21 = load %Callable*, %Callable** %20, align 8 + %22 = call %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %idxOp, %Callable* %21) + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %24 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array*, %Array* }* + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 1 + store %Array* %24, %Array** %27, align 8 + store %Array* %target, %Array** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Array*, { %Array*, %Array* }* }* + %31 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %30, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %31, align 8 + store { %Array*, %Array* }* %26, { %Array*, %Array* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %29, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %idxOp, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + %34 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryFunction__ = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %9 = trunc i64 %__qsVar0__nIndex__ to i32 + %10 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %9) + %__qsVar1__nStates__ = fptosi double %10 to i64 + %11 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %11, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %12 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar1__nStates__, i64 %__qsVar2__nUnitaries__) + %13 = sub i64 %12, 1 + %14 = sub i64 %13, 0 + %15 = sdiv i64 %14, 1 + %16 = mul i64 1, %15 + %17 = add i64 0, %16 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %24 = icmp sgt i64 %22, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar4__idxOp__ = phi i64 [ %21, %preheader__1 ], [ %46, %exiting__1 ] + %25 = icmp sle i64 %__qsVar4__idxOp__, %23 + %26 = icmp sge i64 %__qsVar4__idxOp__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64 }* + %30 = getelementptr inbounds { i64 }, { i64 }* %29, i32 0, i32 0 + store i64 %__qsVar4__idxOp__, i64* %30, align 4 + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__unitaryFunction__, %Tuple* %28, %Tuple* %31) + %32 = bitcast %Tuple* %31 to { %Callable* }* + %33 = getelementptr inbounds { %Callable* }, { %Callable* }* %32, i32 0, i32 0 + %34 = load %Callable*, %Callable** %33, align 8 + %35 = call %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %__qsVar4__idxOp__, %Callable* %34) + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %37 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %38 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %39 = bitcast %Tuple* %38 to { %Array*, %Array* }* + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + store %Array* %37, %Array** %40, align 8 + store %Array* %target, %Array** %41, align 8 + %42 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %43 = bitcast %Tuple* %42 to { %Array*, { %Array*, %Array* }* }* + %44 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %43, i32 0, i32 0 + %45 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %43, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %44, align 8 + store { %Array*, %Array* }* %39, { %Array*, %Array* }** %45, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %42, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %38, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %46 = add i64 %__qsVar4__idxOp__, %22 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + %47 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___79e0da793bac4e01ba7a8549000baf29_ControlledOnInt__body(i64 %numberState, %Callable* %oracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i64, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store i64 %numberState, i64* %4, align 4 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__14__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__4__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__14__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Qubit* }* %15, { i64, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Qubit* }* %15, { i64, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__body(i64 %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__adj(i64 %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Qubit* }*, { i64, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctl(%Array* %3, { i64, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Qubit* }*, { i64, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctladj(%Array* %3, { i64, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__4__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__4__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__body(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array*, %Qubit* }* + %4 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %3, i32 0, i32 1 + store %Array* %controlRegister, %Array** %4, align 8 + store %Qubit* %targetRegister, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__adj(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %2 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %2) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Qubit* }* + %5 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + store %Array* %controlRegister, %Array** %5, align 8 + store %Qubit* %targetRegister, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctl(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %targetRegister, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Qubit* }* %9, { %Array*, %Qubit* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctladj(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %targetRegister, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Qubit* }* %9, { %Array*, %Qubit* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %numberState, %Callable* %oracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i64, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store i64 %numberState, i64* %4, align 4 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__15__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__5__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__15__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Array* }* %15, { i64, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Array* }* %15, { i64, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__body(i64 %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__adj(i64 %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Array* }*, { i64, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctl(%Array* %3, { i64, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Array* }*, { i64, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctladj(%Array* %3, { i64, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__5__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__5__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__body(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array*, %Array* }* + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + store %Array* %controlRegister, %Array** %4, align 8 + store %Array* %targetRegister, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__adj(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %2 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %2) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Array* }* + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + store %Array* %controlRegister, %Array** %5, align 8 + store %Array* %targetRegister, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctl(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %targetRegister, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctladj(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %targetRegister, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____body(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %9 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %9) + %12 = bitcast i8* %11 to %Callable** + %op = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %target, %Array** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %9, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %17 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %18 = phi i64 [ 0, %exit__2 ], [ %23, %exiting__3 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %22 = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %22, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %23 = add i64 %18, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____adj(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %23, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array* }* + %22 = getelementptr inbounds { %Array* }, { %Array* }* %21, i32 0, i32 0 + store %Array* %target, %Array** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %23 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %24 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %25 = phi i64 [ 0, %exit__2 ], [ %30, %exiting__3 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %25) + %28 = bitcast i8* %27 to %Callable** + %29 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %29, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %30 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %12) + %15 = bitcast i8* %14 to %Callable** + %op = load %Callable*, %Callable** %15, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %16 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %16) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, %Array* }* + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %19, align 8 + store %Array* %target, %Array** %20, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %16, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %22 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %23) + %26 = bitcast i8* %25 to %Callable** + %27 = load %Callable*, %Callable** %26, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %14, i1 true) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %15) + %17 = sub i64 %16, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %target, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %34, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %29) + %32 = bitcast i8* %31 to %Callable** + %33 = load %Callable*, %Callable** %32, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %33, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %34 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__body(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__adj(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctl(%Array* %__controlQubits__, %Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctladj(%Array* %__controlQubits__, %Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal i64 @Microsoft__Quantum__Math__MinI__body(i64 %a, i64 %b) { +entry: + %0 = icmp slt i64 %a, %b + %1 = select i1 %0, i64 %a, i64 %b + ret i64 %1 +} + +define internal %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %order, { i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp sgt i64 %order, 2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %stepSizeOuter = call double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) + %4 = fmul double 4.000000e+00, %stepSizeOuter + %stepSizeInner = fsub double 1.000000e+00, %4 + %5 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Callable* }* + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 1 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %5, { i64, %Callable* }* %7, double %10, %Array* %target) + %11 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %nSteps, i64* %14, align 4 + store %Callable* %op, %Callable** %15, align 8 + %16 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %11, { i64, %Callable* }* %13, double %16, %Array* %target) + %17 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { i64, %Callable* }* + %20 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 1 + store i64 %nSteps, i64* %20, align 4 + store %Callable* %op, %Callable** %21, align 8 + %22 = fmul double %stepSizeInner, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %17, { i64, %Callable* }* %19, double %22, %Array* %target) + %23 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, %Callable* }* + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 1 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %23, { i64, %Callable* }* %25, double %28, %Array* %target) + %29 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { i64, %Callable* }* + %32 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 1 + store i64 %nSteps, i64* %32, align 4 + store %Callable* %op, %Callable** %33, align 8 + %34 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %29, { i64, %Callable* }* %31, double %34, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %35 = icmp eq i64 %order, 2 + br i1 %35, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { i64, %Callable* }* + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 1 + store i64 %nSteps, i64* %38, align 4 + store %Callable* %op, %Callable** %39, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body({ i64, %Callable* }* %37, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, %Callable* }* + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + store i64 %nSteps, i64* %42, align 4 + store %Callable* %op, %Callable** %43, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body({ i64, %Callable* }* %41, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, double, %Array* }* + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 2 + store i64 %idx, i64* %8, align 4 + store double %5, double* %9, align 8 + store %Array* %target, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %6, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %12 = sub i64 %nSteps, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idx__1 = phi i64 [ %12, %preheader__1 ], [ %22, %exiting__2 ] + %13 = icmp sle i64 %idx__1, 0 + %14 = icmp sge i64 %idx__1, 0 + %15 = select i1 false, i1 %13, i1 %14 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { i64, double, %Array* }* + %19 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 2 + store i64 %idx__1, i64* %19, align 4 + store double %16, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %idx__1, -1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, double, %Array* }* + %7 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 2 + store i64 %idx, i64* %7, align 4 + store double %stepSize, double* %8, align 8 + store %Array* %target, %Array** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %5, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %order, { i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp sgt i64 %order, 2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %__qsVar0__stepSizeOuter__ = call double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) + %4 = fmul double 4.000000e+00, %__qsVar0__stepSizeOuter__ + %__qsVar1__stepSizeInner__ = fsub double 1.000000e+00, %4 + %5 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Callable* }* + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 1 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %5, { i64, %Callable* }* %7, double %10, %Array* %target) + %11 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %nSteps, i64* %14, align 4 + store %Callable* %op, %Callable** %15, align 8 + %16 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %11, { i64, %Callable* }* %13, double %16, %Array* %target) + %17 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { i64, %Callable* }* + %20 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 1 + store i64 %nSteps, i64* %20, align 4 + store %Callable* %op, %Callable** %21, align 8 + %22 = fmul double %__qsVar1__stepSizeInner__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %17, { i64, %Callable* }* %19, double %22, %Array* %target) + %23 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, %Callable* }* + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 1 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %23, { i64, %Callable* }* %25, double %28, %Array* %target) + %29 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { i64, %Callable* }* + %32 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 1 + store i64 %nSteps, i64* %32, align 4 + store %Callable* %op, %Callable** %33, align 8 + %34 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %29, { i64, %Callable* }* %31, double %34, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %35 = icmp eq i64 %order, 2 + br i1 %35, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { i64, %Callable* }* + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 1 + store i64 %nSteps, i64* %38, align 4 + store %Callable* %op, %Callable** %39, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj({ i64, %Callable* }* %37, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, %Callable* }* + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + store i64 %nSteps, i64* %42, align 4 + store %Callable* %op, %Callable** %43, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj({ i64, %Callable* }* %41, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + %4 = sub i64 0, %3 + %5 = sdiv i64 %4, -1 + %6 = mul i64 -1, %5 + %7 = add i64 %3, %6 + %8 = insertvalue %Range zeroinitializer, i64 %7, 0 + %9 = insertvalue %Range %8, i64 1, 1 + %10 = insertvalue %Range %9, i64 %3, 2 + %11 = extractvalue %Range %10, 0 + %12 = extractvalue %Range %10, 1 + %13 = extractvalue %Range %10, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %14 = icmp sgt i64 %12, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar1__idx__ = phi i64 [ %11, %preheader__1 ], [ %25, %exiting__1 ] + %15 = icmp sle i64 %__qsVar1__idx__, %13 + %16 = icmp sge i64 %__qsVar1__idx__, %13 + %17 = select i1 %14, i1 %15, i1 %16 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + %19 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, double, %Array* }* + %22 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 2 + store i64 %__qsVar1__idx__, i64* %22, align 4 + store double %19, double* %23, align 8 + store %Array* %target, %Array** %24, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %25 = add i64 %__qsVar1__idx__, %12 + br label %header__1 + +exit__1: ; preds = %header__1 + %26 = sub i64 %nSteps, 1 + %27 = sub i64 %26, 0 + %28 = sdiv i64 %27, 1 + %29 = mul i64 1, %28 + %30 = add i64 0, %29 + %31 = insertvalue %Range zeroinitializer, i64 %30, 0 + %32 = insertvalue %Range %31, i64 -1, 1 + %33 = insertvalue %Range %32, i64 0, 2 + %34 = extractvalue %Range %33, 0 + %35 = extractvalue %Range %33, 1 + %36 = extractvalue %Range %33, 2 + br label %preheader__2 + +preheader__2: ; preds = %exit__1 + %37 = icmp sgt i64 %35, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__2 + %__qsVar0__idx__ = phi i64 [ %34, %preheader__2 ], [ %48, %exiting__2 ] + %38 = icmp sle i64 %__qsVar0__idx__, %36 + %39 = icmp sge i64 %__qsVar0__idx__, %36 + %40 = select i1 %37, i1 %38, i1 %39 + br i1 %40, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %41 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %41) + %42 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { i64, double, %Array* }* + %45 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %45, align 4 + store double %42, double* %46, align 8 + store %Array* %target, %Array** %47, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %41, %Tuple* %43, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %48 = add i64 %__qsVar0__idx__, %35 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + %4 = sub i64 %3, 0 + %5 = sdiv i64 %4, 1 + %6 = mul i64 1, %5 + %7 = add i64 0, %6 + %8 = insertvalue %Range zeroinitializer, i64 %7, 0 + %9 = insertvalue %Range %8, i64 -1, 1 + %10 = insertvalue %Range %9, i64 0, 2 + %11 = extractvalue %Range %10, 0 + %12 = extractvalue %Range %10, 1 + %13 = extractvalue %Range %10, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %14 = icmp sgt i64 %12, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idx__ = phi i64 [ %11, %preheader__1 ], [ %24, %exiting__1 ] + %15 = icmp sle i64 %__qsVar0__idx__, %13 + %16 = icmp sge i64 %__qsVar0__idx__, %13 + %17 = select i1 %14, i1 %15, i1 %16 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { i64, double, %Array* }* + %21 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 1 + %23 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %21, align 4 + store double %stepSize, double* %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %__qsVar0__idx__, %12 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %order = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %stepSize = load double, double* %4, align 8 + %5 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 0 + %nSteps = load i64, i64* %6, align 4 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 1 + %op = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %8 = icmp sgt i64 %order, 2 + br i1 %8, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %stepSizeOuter = call double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) + %9 = fmul double 4.000000e+00, %stepSizeOuter + %stepSizeInner = fsub double 1.000000e+00, %9 + %10 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, %Callable* }* + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + store i64 %nSteps, i64* %13, align 4 + store %Callable* %op, %Callable** %14, align 8 + %15 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %10, i64* %18, align 4 + store { i64, %Callable* }* %12, { i64, %Callable* }** %19, align 8 + store double %15, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %17) + %22 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, %Callable* }* + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + store i64 %nSteps, i64* %25, align 4 + store %Callable* %op, %Callable** %26, align 8 + %27 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64, { i64, %Callable* }*, double, %Array* }* + %30 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 3 + store i64 %22, i64* %30, align 4 + store { i64, %Callable* }* %24, { i64, %Callable* }** %31, align 8 + store double %27, double* %32, align 8 + store %Array* %target, %Array** %33, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %29) + %34 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Callable* }* + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + store i64 %nSteps, i64* %37, align 4 + store %Callable* %op, %Callable** %38, align 8 + %39 = fmul double %stepSizeInner, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, { i64, %Callable* }*, double, %Array* }* + %42 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 1 + %44 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 2 + %45 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 3 + store i64 %34, i64* %42, align 4 + store { i64, %Callable* }* %36, { i64, %Callable* }** %43, align 8 + store double %39, double* %44, align 8 + store %Array* %target, %Array** %45, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %41) + %46 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Callable* }* + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + store i64 %nSteps, i64* %49, align 4 + store %Callable* %op, %Callable** %50, align 8 + %51 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %53 = bitcast %Tuple* %52 to { i64, { i64, %Callable* }*, double, %Array* }* + %54 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 1 + %56 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 2 + %57 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 3 + store i64 %46, i64* %54, align 4 + store { i64, %Callable* }* %48, { i64, %Callable* }** %55, align 8 + store double %51, double* %56, align 8 + store %Array* %target, %Array** %57, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %53) + %58 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %59 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %60 = bitcast %Tuple* %59 to { i64, %Callable* }* + %61 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 0 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 1 + store i64 %nSteps, i64* %61, align 4 + store %Callable* %op, %Callable** %62, align 8 + %63 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { i64, { i64, %Callable* }*, double, %Array* }* + %66 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 2 + %69 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 3 + store i64 %58, i64* %66, align 4 + store { i64, %Callable* }* %60, { i64, %Callable* }** %67, align 8 + store double %63, double* %68, align 8 + store %Array* %target, %Array** %69, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %65) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %70 = icmp eq i64 %order, 2 + br i1 %70, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { i64, %Callable* }* + %73 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 1 + store i64 %nSteps, i64* %73, align 4 + store %Callable* %op, %Callable** %74, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %75 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %76 = bitcast %Tuple* %75 to { { i64, %Callable* }*, double, %Array* }* + %77 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 0 + %78 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 1 + %79 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 2 + store { i64, %Callable* }* %72, { i64, %Callable* }** %77, align 8 + store double %stepSize, double* %78, align 8 + store %Array* %target, %Array** %79, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %76) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { i64, %Callable* }* + %82 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 1 + store i64 %nSteps, i64* %82, align 4 + store %Callable* %op, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %85 = bitcast %Tuple* %84 to { { i64, %Callable* }*, double, %Array* }* + %86 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 1 + %88 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 2 + store { i64, %Callable* }* %81, { i64, %Callable* }** %86, align 8 + store double %stepSize, double* %87, align 8 + store %Array* %target, %Array** %88, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %85) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %8 = icmp sle i64 %idx, %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %10 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, double, %Array* }* + %13 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 2 + store i64 %idx, i64* %13, align 4 + store double %10, double* %14, align 8 + store %Array* %target, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { i64, double, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { i64, double, %Array* }* %12, { i64, double, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %21 = sub i64 %nSteps, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idx__1 = phi i64 [ %21, %preheader__1 ], [ %36, %exiting__2 ] + %22 = icmp sle i64 %idx__1, 0 + %23 = icmp sge i64 %idx__1, 0 + %24 = select i1 false, i1 %22, i1 %23 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %25) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %26 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { i64, double, %Array* }* + %29 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 2 + store i64 %idx__1, i64* %29, align 4 + store double %26, double* %30, align 8 + store %Array* %target, %Array** %31, align 8 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Array*, { i64, double, %Array* }* }* + %34 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %33, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %34, align 8 + store { i64, double, %Array* }* %28, { i64, double, %Array* }** %35, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %25, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %idx__1, -1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idx, %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, double, %Array* }* + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 2 + store i64 %idx, i64* %12, align 4 + store double %stepSize, double* %13, align 8 + store %Array* %target, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { i64, double, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store { i64, double, %Array* }* %11, { i64, double, %Array* }** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %order = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %stepSize = load double, double* %4, align 8 + %5 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 0 + %nSteps = load i64, i64* %6, align 4 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 1 + %op = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %8 = icmp sgt i64 %order, 2 + br i1 %8, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %__qsVar0__stepSizeOuter__ = call double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) + %9 = fmul double 4.000000e+00, %__qsVar0__stepSizeOuter__ + %__qsVar1__stepSizeInner__ = fsub double 1.000000e+00, %9 + %10 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, %Callable* }* + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + store i64 %nSteps, i64* %13, align 4 + store %Callable* %op, %Callable** %14, align 8 + %15 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %10, i64* %18, align 4 + store { i64, %Callable* }* %12, { i64, %Callable* }** %19, align 8 + store double %15, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %17) + %22 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, %Callable* }* + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + store i64 %nSteps, i64* %25, align 4 + store %Callable* %op, %Callable** %26, align 8 + %27 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64, { i64, %Callable* }*, double, %Array* }* + %30 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 3 + store i64 %22, i64* %30, align 4 + store { i64, %Callable* }* %24, { i64, %Callable* }** %31, align 8 + store double %27, double* %32, align 8 + store %Array* %target, %Array** %33, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %29) + %34 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Callable* }* + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + store i64 %nSteps, i64* %37, align 4 + store %Callable* %op, %Callable** %38, align 8 + %39 = fmul double %__qsVar1__stepSizeInner__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, { i64, %Callable* }*, double, %Array* }* + %42 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 1 + %44 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 2 + %45 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 3 + store i64 %34, i64* %42, align 4 + store { i64, %Callable* }* %36, { i64, %Callable* }** %43, align 8 + store double %39, double* %44, align 8 + store %Array* %target, %Array** %45, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %41) + %46 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Callable* }* + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + store i64 %nSteps, i64* %49, align 4 + store %Callable* %op, %Callable** %50, align 8 + %51 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %53 = bitcast %Tuple* %52 to { i64, { i64, %Callable* }*, double, %Array* }* + %54 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 1 + %56 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 2 + %57 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 3 + store i64 %46, i64* %54, align 4 + store { i64, %Callable* }* %48, { i64, %Callable* }** %55, align 8 + store double %51, double* %56, align 8 + store %Array* %target, %Array** %57, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %53) + %58 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %59 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %60 = bitcast %Tuple* %59 to { i64, %Callable* }* + %61 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 0 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 1 + store i64 %nSteps, i64* %61, align 4 + store %Callable* %op, %Callable** %62, align 8 + %63 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { i64, { i64, %Callable* }*, double, %Array* }* + %66 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 2 + %69 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 3 + store i64 %58, i64* %66, align 4 + store { i64, %Callable* }* %60, { i64, %Callable* }** %67, align 8 + store double %63, double* %68, align 8 + store %Array* %target, %Array** %69, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %65) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %70 = icmp eq i64 %order, 2 + br i1 %70, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { i64, %Callable* }* + %73 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 1 + store i64 %nSteps, i64* %73, align 4 + store %Callable* %op, %Callable** %74, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %75 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %76 = bitcast %Tuple* %75 to { { i64, %Callable* }*, double, %Array* }* + %77 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 0 + %78 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 1 + %79 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 2 + store { i64, %Callable* }* %72, { i64, %Callable* }** %77, align 8 + store double %stepSize, double* %78, align 8 + store %Array* %target, %Array** %79, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %76) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { i64, %Callable* }* + %82 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 1 + store i64 %nSteps, i64* %82, align 4 + store %Callable* %op, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %85 = bitcast %Tuple* %84 to { { i64, %Callable* }*, double, %Array* }* + %86 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 1 + %88 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 2 + store { i64, %Callable* }* %81, { i64, %Callable* }** %86, align 8 + store double %stepSize, double* %87, align 8 + store %Array* %target, %Array** %88, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %85) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + %8 = sub i64 0, %7 + %9 = sdiv i64 %8, -1 + %10 = mul i64 -1, %9 + %11 = add i64 %7, %10 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 1, 1 + %14 = insertvalue %Range %13, i64 %7, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar1__idx__ = phi i64 [ %15, %preheader__1 ], [ %33, %exiting__1 ] + %19 = icmp sle i64 %__qsVar1__idx__, %17 + %20 = icmp sge i64 %__qsVar1__idx__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, double, %Array* }* + %26 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 2 + store i64 %__qsVar1__idx__, i64* %26, align 4 + store double %23, double* %27, align 8 + store %Array* %target, %Array** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Array*, { i64, double, %Array* }* }* + %31 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %30, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %31, align 8 + store { i64, double, %Array* }* %25, { i64, double, %Array* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %29, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %__qsVar1__idx__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + %34 = sub i64 %nSteps, 1 + %35 = sub i64 %34, 0 + %36 = sdiv i64 %35, 1 + %37 = mul i64 1, %36 + %38 = add i64 0, %37 + %39 = insertvalue %Range zeroinitializer, i64 %38, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__2 + +preheader__2: ; preds = %exit__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__2 + %__qsVar0__idx__ = phi i64 [ %42, %preheader__2 ], [ %60, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0__idx__, %44 + %47 = icmp sge i64 %__qsVar0__idx__, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %49, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %49) + call void @__quantum__rt__callable_make_controlled(%Callable* %49) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %50 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %51 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %52 = bitcast %Tuple* %51 to { i64, double, %Array* }* + %53 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 0 + %54 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 1 + %55 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %53, align 4 + store double %50, double* %54, align 8 + store %Array* %target, %Array** %55, align 8 + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { %Array*, { i64, double, %Array* }* }* + %58 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %57, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %58, align 8 + store { i64, double, %Array* }* %52, { i64, double, %Array* }** %59, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %49, %Tuple* %56, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %49, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %49, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %__qsVar0__idx__, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + %8 = sub i64 %7, 0 + %9 = sdiv i64 %8, 1 + %10 = mul i64 1, %9 + %11 = add i64 0, %10 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idx__ = phi i64 [ %15, %preheader__1 ], [ %32, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idx__, %17 + %20 = icmp sge i64 %__qsVar0__idx__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, double, %Array* }* + %25 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %25, align 4 + store double %stepSize, double* %26, align 8 + store %Array* %target, %Array** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Array*, { i64, double, %Array* }* }* + %30 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %29, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %30, align 8 + store { i64, double, %Array* }* %24, { i64, double, %Array* }** %31, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %28, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %32 = add i64 %__qsVar0__idx__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__body(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Qubit* }* + %9 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %9, align 8 + store %Qubit* %targetRegister, %Qubit** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %actual, %String* %message) { +entry: + %0 = xor i1 %actual, true + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__string_update_reference_count(%String* %message, i32 1) + call void @__quantum__rt__fail(%String* %message) + unreachable + +continue__1: ; preds = %entry + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__adj(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Qubit* }* + %9 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %9, align 8 + store %Qubit* %targetRegister, %Qubit** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctl(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %14, align 8 + store %Qubit* %targetRegister, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctladj(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %14, align 8 + store %Qubit* %targetRegister, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__body(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %9, align 8 + store %Array* %targetRegister, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__adj(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %9, align 8 + store %Array* %targetRegister, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctl(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %14, align 8 + store %Array* %targetRegister, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Array* }* %13, { %Array*, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctladj(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %14, align 8 + store %Array* %targetRegister, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Array* }* %13, { %Array*, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal { double, double }* @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____body(%Callable* %outer, %Callable* %inner, double %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double }* + %2 = getelementptr inbounds { double }, { double }* %1, i32 0, i32 0 + store double %target, double* %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %inner, %Tuple* %0, %Tuple* %3) + %4 = bitcast %Tuple* %3 to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + %6 = load double, double* %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { double }* + %9 = getelementptr inbounds { double }, { double }* %8, i32 0, i32 0 + store double %6, double* %9, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %outer, %Tuple* %7, %Tuple* %10) + %11 = bitcast %Tuple* %10 to { { double, double }* }* + %12 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret { double, double }* %13 +} + +define internal %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %bits, %Array** %4, align 8 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__16__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__6__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__16__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Qubit* }* %15, { %Array*, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Qubit* }* %15, { %Array*, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__body(%Array* %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__adj(%Array* %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Qubit* }*, { %Array*, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctl(%Array* %3, { %Array*, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Qubit* }*, { %Array*, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctladj(%Array* %3, { %Array*, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__6__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__6__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %bits, %Array** %4, align 8 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__17__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__7__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__17__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Array* }* %15, { %Array*, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Array* }* %15, { %Array*, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__body(%Array* %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__adj(%Array* %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Array* }*, { %Array*, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctl(%Array* %3, { %Array*, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Array* }*, { %Array*, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctladj(%Array* %3, { %Array*, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__7__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__7__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %outer, %Callable* %inner) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %inner, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Callable* %outer, %Callable** %4, align 8 + store %Callable* %inner, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__18__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__8__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__18__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { double }* + %6 = getelementptr inbounds { double }, { double }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, double }* getelementptr ({ %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store %Callable* %4, %Callable** %11, align 8 + store double %7, double* %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load %Callable*, %Callable** %2, align 8 + %6 = load double, double* %3, align 8 + %7 = call { double, double }* @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____body(%Callable* %4, %Callable* %5, double %6) + %8 = bitcast %Tuple* %result-tuple to { { double, double }* }* + %9 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %8, i32 0, i32 0 + store { double, double }* %7, { double, double }** %9, align 8 + ret void +} + +define internal void @MemoryManagement__8__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__8__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %number, i64 %bits) { +entry: + %tempInt = alloca i64, align 8 + %outputBits = alloca %Array*, align 8 + %0 = icmp sge i64 %bits, 0 + br i1 %0, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %1 = icmp sle i64 %bits, 63 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %2 = phi i1 [ %1, %condTrue__1 ], [ %0, %entry ] + %3 = trunc i64 %bits to i32 + %4 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %3) + %5 = fptosi double %4 to i64 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([33 x i8], [33 x i8]* @19, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__int_to_string(i64 %5) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %8) + %9 = icmp slt i64 %bits, 63 + br i1 %9, label %condTrue__2, label %condFalse__1 + +condTrue__2: ; preds = %condContinue__1 + %10 = shl i64 1, %bits + br label %condContinue__2 + +condFalse__1: ; preds = %condContinue__1 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__1, %condTrue__2 + %max = phi i64 [ %10, %condTrue__2 ], [ 9223372036854775807, %condFalse__1 ] + %11 = icmp sge i64 %number, 0 + br i1 %11, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condContinue__2 + %12 = icmp sle i64 %number, %max + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condContinue__2 + %13 = phi i1 [ %12, %condTrue__3 ], [ %11, %condContinue__2 ] + %14 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([34 x i8], [34 x i8]* @20, i32 0, i32 0)) + %15 = call %String* @__quantum__rt__int_to_string(i64 %bits) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @21, i32 0, i32 0)) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__int_to_string(i64 %number) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %13, %String* %22) + %23 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %bits) + %24 = sub i64 %bits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %condContinue__3 + %25 = phi i64 [ 0, %condContinue__3 ], [ %29, %exiting__1 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %25) + %28 = bitcast i8* %27 to i1* + store i1 false, i1* %28, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %29 = add i64 %25, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %23, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + store i64 %number, i64* %tempInt, align 4 + %30 = sub i64 %bits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %41, %exiting__2 ] + %31 = icmp sle i64 %idxBit, %30 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = load i64, i64* %tempInt, align 4 + %35 = srem i64 %34, 2 + %36 = icmp eq i64 %35, 0 + %37 = select i1 %36, i1 false, i1 true + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxBit) + %39 = bitcast i8* %38 to i1* + store i1 %37, i1* %39, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %outputBits, align 8 + %40 = sdiv i64 %34, 2 + store i64 %40, i64* %tempInt, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %41 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %42 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + ret %Array* %42 +} + +define internal void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + %4 = sub i64 %3, %1 + %5 = sdiv i64 %4, %2 + %6 = mul i64 %2, %5 + %7 = add i64 %1, %6 + %8 = sub i64 0, %2 + %9 = insertvalue %Range zeroinitializer, i64 %7, 0 + %10 = insertvalue %Range %9, i64 %8, 1 + %11 = insertvalue %Range %10, i64 %1, 2 + %12 = extractvalue %Range %11, 0 + %13 = extractvalue %Range %11, 1 + %14 = extractvalue %Range %11, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %15 = icmp sgt i64 %13, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %12, %preheader__1 ], [ %26, %exiting__1 ] + %16 = icmp sle i64 %__qsVar0__idxQubit__, %14 + %17 = icmp sge i64 %__qsVar0__idxQubit__, %14 + %18 = select i1 %15, i1 %16, i1 %17 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Qubit* }* + %25 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %24, i32 0, i32 0 + store %Qubit* %22, %Qubit** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %26 = add i64 %__qsVar0__idxQubit__, %13 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %7 = icmp sgt i64 %5, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %4, %preheader__1 ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idxQubit, %6 + %9 = icmp sge i64 %idxQubit, %6 + %10 = select i1 %7, i1 %8, i1 %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store %Qubit* %14, %Qubit** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxQubit, %5 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + %7 = sub i64 %6, %4 + %8 = sdiv i64 %7, %5 + %9 = mul i64 %5, %8 + %10 = add i64 %4, %9 + %11 = sub i64 0, %5 + %12 = insertvalue %Range zeroinitializer, i64 %10, 0 + %13 = insertvalue %Range %12, i64 %11, 1 + %14 = insertvalue %Range %13, i64 %4, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %15, %preheader__1 ], [ %30, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idxQubit__, %17 + %20 = icmp sge i64 %__qsVar0__idxQubit__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, %Qubit* }* + %28 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %28, align 8 + store %Qubit* %25, %Qubit** %29, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %26, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %30 = add i64 %__qsVar0__idxQubit__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___d9c24574d9ed4a4aba478cabe8323707_DecomposedIntoTimeStepsCA__body({ i64, %Callable* }* %0, i64 %trotterOrder) { +entry: + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp eq i64 %trotterOrder, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, i64, %Callable* }* + %7 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 2 + store %Callable* %4, %Callable** %7, align 8 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__19__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__9__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %10 + +test1__1: ; preds = %entry + %11 = icmp eq i64 %trotterOrder, 2 + br i1 %11, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Callable*, i64, %Callable* }* + %15 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 1 + %17 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 2 + store %Callable* %12, %Callable** %15, align 8 + store i64 %nSteps, i64* %16, align 4 + store %Callable* %op, %Callable** %17, align 8 + %18 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__20__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__9__FunctionTable, %Tuple* %13) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %18 + +test2__1: ; preds = %test1__1 + %19 = srem i64 %trotterOrder, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %21 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, i64, %Callable* }* getelementptr ({ %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { %Callable*, i64, i64, %Callable* }* + %24 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 3 + store %Callable* %21, %Callable** %24, align 8 + store i64 %trotterOrder, i64* %25, align 4 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__21__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %22) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %28 + +else__1: ; preds = %test2__1 + %29 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @8, i32 0, i32 0)) + %30 = call %String* @__quantum__rt__int_to_string(i64 %trotterOrder) + %31 = call %String* @__quantum__rt__string_concatenate(%String* %29, %String* %30) + call void @__quantum__rt__string_update_reference_count(%String* %29, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + %32 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @9, i32 0, i32 0)) + %33 = call %String* @__quantum__rt__string_concatenate(%String* %31, %String* %32) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__fail(%String* %33) + unreachable + +continue__1: ; No predecessors! + unreachable +} + +define internal void @Lifted__PartialApplication__19__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__9__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__9__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__20__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Lifted__PartialApplication__21__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Callable* }* + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 1 + store i64 %4, i64* %9, align 4 + store %Callable* %6, %Callable** %10, align 8 + %11 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 0 + %13 = load double, double* %12, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %2, i64* %18, align 4 + store { i64, %Callable* }* %8, { i64, %Callable* }** %19, align 8 + store double %13, double* %20, align 8 + store %Array* %15, %Array** %21, align 8 + %22 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %16, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Callable* }* + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 1 + store i64 %4, i64* %9, align 4 + store %Callable* %6, %Callable** %10, align 8 + %11 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 0 + %13 = load double, double* %12, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %2, i64* %18, align 4 + store { i64, %Callable* }* %8, { i64, %Callable* }** %19, align 8 + store double %13, double* %20, align 8 + store %Array* %15, %Array** %21, align 8 + %22 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %16, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 3 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %9, i64* %14, align 4 + store %Callable* %11, %Callable** %15, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %17 = load double, double* %16, align 8 + %18 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, { i64, %Callable* }*, double, %Array* }* + %22 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 3 + store i64 %7, i64* %22, align 4 + store { i64, %Callable* }* %13, { i64, %Callable* }** %23, align 8 + store double %17, double* %24, align 8 + store %Array* %19, %Array** %25, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %28 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 1 + store %Array* %3, %Array** %28, align 8 + store { i64, { i64, %Callable* }*, double, %Array* }* %21, { i64, { i64, %Callable* }*, double, %Array* }** %29, align 8 + %30 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 0 + %31 = load %Callable*, %Callable** %30, align 8 + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %32) + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %26, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 3 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %9, i64* %14, align 4 + store %Callable* %11, %Callable** %15, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %17 = load double, double* %16, align 8 + %18 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, { i64, %Callable* }*, double, %Array* }* + %22 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 3 + store i64 %7, i64* %22, align 4 + store { i64, %Callable* }* %13, { i64, %Callable* }** %23, align 8 + store double %17, double* %24, align 8 + store %Array* %19, %Array** %25, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %28 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 1 + store %Array* %3, %Array** %28, align 8 + store { i64, { i64, %Callable* }*, double, %Array* }* %21, { i64, { i64, %Callable* }*, double, %Array* }** %29, align 8 + %30 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 0 + %31 = load %Callable*, %Callable** %30, align 8 + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %32) + call void @__quantum__rt__callable_make_controlled(%Callable* %32) + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %26, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %5, { i64, %Callable* }* %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %5, { i64, %Callable* }* %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, { i64, %Callable* }*, double, %Array* }*, { i64, { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %3, { i64, { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, { i64, %Callable* }*, double, %Array* }*, { i64, { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %3, { i64, { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__10__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__10__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +declare %String* @__quantum__rt__int_to_string(i64) + +define internal %Callable* @Microsoft__Quantum__Canon___55c7b8d161af40c49ac844f8a0630208_BoundCA__body(%Array* %operations) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %operations, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %operations, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__22__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + ret %Callable* %20 +} + +define internal void @Lifted__PartialApplication__22__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__11__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__11__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____body(i64 %idxTerm, i64 %nTermsA, i64 %nTermsB, %Callable* %generatorIndexFunctionA, %Callable* %generatorIndexFunctionB) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + %0 = icmp slt i64 %idxTerm, %nTermsA + br i1 %0, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { i64 }* + %3 = getelementptr inbounds { i64 }, { i64 }* %2, i32 0, i32 0 + store i64 %idxTerm, i64* %3, align 4 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorIndexFunctionA, %Tuple* %1, %Tuple* %4) + %5 = bitcast %Tuple* %4 to { { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %7 + +else__1: ; preds = %entry + %8 = sub i64 %idxTerm, %nTermsA + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %8, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorIndexFunctionB, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %15 + +continue__1: ; No predecessors! + unreachable +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____body(double %trotterStepSize, i64 %trotterOrder, double %maxTime, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %11 = fdiv double %maxTime, %trotterStepSize + %nTimeSlices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %11) + %12 = sitofp i64 %nTimeSlices to double + %resizedTrotterStepSize = fdiv double %maxTime, %12 + %13 = sub i64 %nTimeSlices, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxTimeSlice = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %idxTimeSlice, %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %resizedTrotterStepSize) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array* }* + %18 = getelementptr inbounds { %Array* }, { %Array* }* %17, i32 0, i32 0 + store %Array* %qubits, %Array** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxTimeSlice, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal i64 @Microsoft__Quantum__Math__Ceiling__body(double %value) { +entry: + %0 = call { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef1__ExtendedTruncation____body(double %value) + %1 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 0 + %truncated = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 1 + %remainder = load double, double* %2, align 8 + %3 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 2 + %isPositive = load i1, i1* %3, align 1 + %4 = call double @Microsoft__Quantum__Math__AbsD__body(double %remainder) + %5 = fcmp ole double %4, 1.000000e-15 + br i1 %5, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %6 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret i64 %truncated + +else__1: ; preds = %entry + br i1 %isPositive, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %7 = add i64 %truncated, 1 + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %8 = phi i64 [ %7, %condTrue__1 ], [ %truncated, %condFalse__1 ] + %9 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret i64 %8 + +continue__1: ; No predecessors! + unreachable +} + +define internal %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %trotterStepSize) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %6 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %12 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %11, i32 0, i32 1 + store %Callable* %9, %Callable** %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__25__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__14__FunctionTable, %Tuple* %10) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %trotterForm = bitcast %Tuple* %15 to { i64, %Callable* }* + %16 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %trotterForm, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %trotterForm, i32 0, i32 1 + store i64 %nTerms, i64* %16, align 4 + store %Callable* %14, %Callable** %17, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %18 = call %Callable* @Microsoft__Quantum__Canon___d9c24574d9ed4a4aba478cabe8323707_DecomposedIntoTimeStepsCA__body({ i64, %Callable* }* %trotterForm, i64 %trotterOrder) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Callable*, double }* + %21 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %20, i32 0, i32 1 + store %Callable* %18, %Callable** %21, align 8 + store double %trotterStepSize, double* %22, align 8 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__26__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %19) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Callable* %23 +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____adj(double %trotterStepSize, i64 %trotterOrder, double %maxTime, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %11 = fdiv double %maxTime, %trotterStepSize + %__qsVar0__nTimeSlices__ = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %11) + %12 = sitofp i64 %__qsVar0__nTimeSlices__ to double + %__qsVar1__resizedTrotterStepSize__ = fdiv double %maxTime, %12 + %13 = sub i64 %__qsVar0__nTimeSlices__, 1 + %14 = sub i64 %13, 0 + %15 = sdiv i64 %14, 1 + %16 = mul i64 1, %15 + %17 = add i64 0, %16 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %24 = icmp sgt i64 %22, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar2__idxTimeSlice__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__1 ] + %25 = icmp sle i64 %__qsVar2__idxTimeSlice__, %23 + %26 = icmp sge i64 %__qsVar2__idxTimeSlice__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %28 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %__qsVar1__resizedTrotterStepSize__) + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array* }* + %32 = getelementptr inbounds { %Array* }, { %Array* }* %31, i32 0, i32 0 + store %Array* %qubits, %Array** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %__qsVar2__idxTimeSlice__, %22 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctl(%Array* %__controlQubits__, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %trotterStepSize = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %trotterOrder = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %maxTime = load double, double* %3, align 8 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %qubits = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %17 = fdiv double %maxTime, %trotterStepSize + %nTimeSlices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %17) + %18 = sitofp i64 %nTimeSlices to double + %resizedTrotterStepSize = fdiv double %maxTime, %18 + %19 = sub i64 %nTimeSlices, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxTimeSlice = phi i64 [ 0, %entry ], [ %27, %exiting__1 ] + %20 = icmp sle i64 %idxTimeSlice, %19 + br i1 %20, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %21 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %resizedTrotterStepSize) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %21, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %qubits, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %27 = add i64 %idxTimeSlice, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctladj(%Array* %__controlQubits__, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %trotterStepSize = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %trotterOrder = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %maxTime = load double, double* %3, align 8 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %qubits = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %17 = fdiv double %maxTime, %trotterStepSize + %__qsVar0__nTimeSlices__ = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %17) + %18 = sitofp i64 %__qsVar0__nTimeSlices__ to double + %__qsVar1__resizedTrotterStepSize__ = fdiv double %maxTime, %18 + %19 = sub i64 %__qsVar0__nTimeSlices__, 1 + %20 = sub i64 %19, 0 + %21 = sdiv i64 %20, 1 + %22 = mul i64 1, %21 + %23 = add i64 0, %22 + %24 = insertvalue %Range zeroinitializer, i64 %23, 0 + %25 = insertvalue %Range %24, i64 -1, 1 + %26 = insertvalue %Range %25, i64 0, 2 + %27 = extractvalue %Range %26, 0 + %28 = extractvalue %Range %26, 1 + %29 = extractvalue %Range %26, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %30 = icmp sgt i64 %28, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar2__idxTimeSlice__ = phi i64 [ %27, %preheader__1 ], [ %40, %exiting__1 ] + %31 = icmp sle i64 %__qsVar2__idxTimeSlice__, %29 + %32 = icmp sge i64 %__qsVar2__idxTimeSlice__, %29 + %33 = select i1 %30, i1 %31, i1 %32 + br i1 %33, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %34 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %__qsVar1__resizedTrotterStepSize__) + %35 = call %Callable* @__quantum__rt__callable_copy(%Callable* %34, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %35) + call void @__quantum__rt__callable_make_controlled(%Callable* %35) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, %Array* }* + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %37, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %38, align 8 + store %Array* %qubits, %Array** %39, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %35, %Tuple* %36, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %40 = add i64 %__qsVar2__idxTimeSlice__, %28 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %idx, double %stepsize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %6 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %idx, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorSystemFunction, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %16 = load { %Array*, %Array* }*, { %Array*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Callable* }* }* + %27 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Callable* }*, { %Callable* }** %27, align 8 + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { double, %Array* }* + %33 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %32, i32 0, i32 1 + store double %stepsize, double* %33, align 8 + store %Array* %qubits, %Array** %34, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %30, %Tuple* %31, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + %35 = bitcast { %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____adj({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %idx, double %stepsize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %__qsVar0__evolutionSet__ = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %__qsVar0__evolutionSet__, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %__qsVar0__evolutionSet__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %__qsVar1__generatorSystem__ = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 1 + %__qsVar3__generatorSystemFunction__ = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %6 = bitcast { i64, %Callable* }* %__qsVar1__generatorSystem__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 0 + %__qsVar2__nTerms__ = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %idx, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__generatorSystemFunction__, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %__qsVar4__generatorIndex__ = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 0 + %16 = load { %Array*, %Array* }*, { %Array*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Callable* }* }* + %27 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Callable* }*, { %Callable* }** %27, align 8 + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + %31 = call %Callable* @__quantum__rt__callable_copy(%Callable* %30, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array* }* + %34 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %33, i32 0, i32 1 + store double %stepsize, double* %34, align 8 + store %Array* %qubits, %Array** %35, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %31, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + %36 = bitcast { %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctl(%Array* %__controlQubits__, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %2, align 8 + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %8 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %idx = load i64, i64* %10, align 4 + %11 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %stepsize = load double, double* %11, align 8 + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %13, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64 }* + %16 = getelementptr inbounds { i64 }, { i64 }* %15, i32 0, i32 0 + store i64 %idx, i64* %16, align 4 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorSystemFunction, %Tuple* %14, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { { %Array*, %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %19, align 8 + %20 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %29, %Tuple* %30) + %31 = bitcast %Tuple* %30 to { { %Callable* }* }* + %32 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Callable* }*, { %Callable* }** %32, align 8 + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { double, %Array* }* + %39 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 1 + store double %stepsize, double* %39, align 8 + store %Array* %qubits, %Array** %40, align 8 + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { %Array*, { double, %Array* }* }* + %43 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %43, align 8 + store { double, %Array* }* %38, { double, %Array* }** %44, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %41, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + %45 = bitcast { %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctladj(%Array* %__controlQubits__, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %__qsVar0__evolutionSet__ = load { %Callable* }*, { %Callable* }** %2, align 8 + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %__qsVar0__evolutionSet__, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %__qsVar0__evolutionSet__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %__qsVar1__generatorSystem__ = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 1 + %__qsVar3__generatorSystemFunction__ = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %8 = bitcast { i64, %Callable* }* %__qsVar1__generatorSystem__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %idx = load i64, i64* %10, align 4 + %11 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %stepsize = load double, double* %11, align 8 + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 0 + %__qsVar2__nTerms__ = load i64, i64* %13, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64 }* + %16 = getelementptr inbounds { i64 }, { i64 }* %15, i32 0, i32 0 + store i64 %idx, i64* %16, align 4 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__generatorSystemFunction__, %Tuple* %14, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { { %Array*, %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %__qsVar4__generatorIndex__ = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %19, align 8 + %20 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 0 + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %29, %Tuple* %30) + %31 = bitcast %Tuple* %30 to { { %Callable* }* }* + %32 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Callable* }*, { %Callable* }** %32, align 8 + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { double, %Array* }* + %39 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 1 + store double %stepsize, double* %39, align 8 + store %Array* %qubits, %Array** %40, align 8 + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { %Array*, { double, %Array* }* }* + %43 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %43, align 8 + store { double, %Array* }* %38, { double, %Array* }** %44, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %41, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + %45 = bitcast { %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body({ i64, %Callable* }* %generatorSystemA, { i64, %Callable* }* %generatorSystemB) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystemA, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %generatorSystemA to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystemB, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { i64, %Callable* }* %generatorSystemB to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %nTermsA = call i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystemA) + %nTermsB = call i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystemB) + %generatorIndexFunctionA = call %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystemA) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + %generatorIndexFunctionB = call %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystemB) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionB, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, i64, %Callable*, %Callable* }* getelementptr ({ %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Callable*, i64, i64, %Callable*, %Callable* }* + %9 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 4 + store %Callable* %6, %Callable** %9, align 8 + store i64 %nTermsA, i64* %10, align 4 + store i64 %nTermsB, i64* %11, align 4 + store %Callable* %generatorIndexFunctionA, %Callable** %12, align 8 + store %Callable* %generatorIndexFunctionB, %Callable** %13, align 8 + %generatorIndexFunction = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__23__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__12__FunctionTable, %Tuple* %7) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %14 = add i64 %nTermsA, %nTermsB + %15 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %14, %Callable* %generatorIndexFunction) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunction, i32 -1) + ret { i64, %Callable* }* %15 +} + +define internal i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorIndexFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + ret i64 %nTerms +} + +define internal %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorIndexFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + ret %Callable* %generatorIndexFunction +} + +define internal void @Lifted__PartialApplication__23__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 1 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 2 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 3 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 4 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, i64, %Callable*, %Callable* }* getelementptr ({ i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, i64, i64, %Callable*, %Callable* }* + %14 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 4 + store i64 %2, i64* %14, align 4 + store i64 %5, i64* %15, align 4 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Callable* %11, %Callable** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load i64, i64* %1, align 4 + %7 = load i64, i64* %2, align 4 + %8 = load i64, i64* %3, align 4 + %9 = load %Callable*, %Callable** %4, align 8 + %10 = load %Callable*, %Callable** %5, align 8 + %11 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____body(i64 %6, i64 %7, i64 %8, %Callable* %9, %Callable* %10) + %12 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %11, { { %Array*, %Array* }*, %Array* }** %13, align 8 + ret void +} + +define internal void @MemoryManagement__12__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__12__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %__Item1__, %Callable* %__Item2__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item2__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64, %Callable* }* + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 1 + store i64 %__Item1__, i64* %2, align 4 + store %Callable* %__Item2__, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item2__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item2__, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %__Item1__, { i64, %Callable* }* %__Item2__) { +entry: + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__Item2__, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { i64, %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }*, { i64, %Callable* }* }* getelementptr ({ { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Callable* }*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store { %Callable* }* %__Item1__, { %Callable* }** %8, align 8 + store { i64, %Callable* }* %__Item2__, { i64, %Callable* }** %9, align 8 + %10 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__Item2__, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 1) + %14 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 1) + %15 = bitcast { i64, %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + ret { { %Callable* }*, { i64, %Callable* }* }* %7 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %0, %Array* %__Item3__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array* }*, %Array* }* getelementptr ({ { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { %Array*, %Array* }*, %Array* }* + %3 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %2, i32 0, i32 1 + store { %Array*, %Array* }* %0, { %Array*, %Array* }** %3, align 8 + store %Array* %__Item3__, %Array** %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + %9 = bitcast { %Array*, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__Item3__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %2 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body(i64 %idxTerm) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i64* + store i64 0, i64* %2, align 4 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to double* + store double 0.000000e+00, double* %5, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %0, %Array** %8, align 8 + store %Array* %3, %Array** %9, align 8 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 0) + %12 = bitcast i8* %11 to i64* + store i64 0, i64* %12, align 4 + %13 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %7, %Array* %10) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %13 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__IdentityGeneratorSystem__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 0, %Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body(i64 %2) + %4 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %5 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %4, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %3, { { %Array*, %Array* }*, %Array* }** %5, align 8 + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__SimulationAlgorithm__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__SumGeneratorSystems__body(%Array* %generatorSystems) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %generatorSystems) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %generatorSystems, i64 %2) + %5 = bitcast i8* %4 to { i64, %Callable* }** + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %generatorSystems, i32 1) + %11 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__AddGeneratorSystems__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %12 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__IdentityGeneratorSystem__body() + %13 = call { i64, %Callable* }* @Microsoft__Quantum__Arrays___2d898dd22e254b94929370686c0145ed_Fold__body(%Callable* %11, { i64, %Callable* }* %12, %Array* %generatorSystems) + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + %16 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %generatorSystems, i64 %17) + %20 = bitcast i8* %19 to { i64, %Callable* }** + %21 = load { i64, %Callable* }*, { i64, %Callable* }** %20, align 8 + %22 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %21, i32 0, i32 1 + %23 = load %Callable*, %Callable** %22, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %23, i32 -1) + %24 = bitcast { i64, %Callable* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %generatorSystems, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + %26 = bitcast { i64, %Callable* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + ret { i64, %Callable* }* %13 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Arrays___2d898dd22e254b94929370686c0145ed_Fold__body(%Callable* %folder, { i64, %Callable* }* %state, %Array* %array) { +entry: + %current = alloca { i64, %Callable* }*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 1) + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %state, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %state to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %5) + %8 = bitcast i8* %7 to { i64, %Callable* }** + %9 = load { i64, %Callable* }*, { i64, %Callable* }** %8, align 8 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %9, i32 0, i32 1 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 1) + %12 = bitcast { i64, %Callable* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store { i64, %Callable* }* %state, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %14 = call %Range @Microsoft__Quantum__Arrays___ab9454a18cf34e7dab26076c15ee491d_IndexRange__body(%Array* %array) + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %18 = icmp sgt i64 %16, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxElement = phi i64 [ %15, %preheader__1 ], [ %43, %exiting__2 ] + %19 = icmp sle i64 %idxElement, %17 + %20 = icmp sge i64 %idxElement, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = load { i64, %Callable* }*, { i64, %Callable* }** %current, align 8 + %23 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %22, i32 0, i32 1 + %24 = load %Callable*, %Callable** %23, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 1) + %25 = bitcast { i64, %Callable* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 1) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %27 = bitcast i8* %26 to { i64, %Callable* }** + %28 = load { i64, %Callable* }*, { i64, %Callable* }** %27, align 8 + %29 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %28, i32 0, i32 1 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 1) + %31 = bitcast { i64, %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { { i64, %Callable* }*, { i64, %Callable* }* }* + %34 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %33, i32 0, i32 1 + store { i64, %Callable* }* %22, { i64, %Callable* }** %34, align 8 + store { i64, %Callable* }* %28, { i64, %Callable* }** %35, align 8 + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }* }, { { i64, %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %folder, %Tuple* %32, %Tuple* %36) + %37 = bitcast %Tuple* %36 to { { i64, %Callable* }* }* + %38 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %37, i32 0, i32 0 + %39 = load { i64, %Callable* }*, { i64, %Callable* }** %38, align 8 + %40 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %39, i32 0, i32 1 + %41 = load %Callable*, %Callable** %40, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %41, i32 1) + %42 = bitcast { i64, %Callable* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + store { i64, %Callable* }* %39, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %43 = add i64 %idxElement, %16 + br label %header__2 + +exit__2: ; preds = %header__2 + %44 = load { i64, %Callable* }*, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %45 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %46 = phi i64 [ 0, %exit__2 ], [ %54, %exiting__3 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %46) + %49 = bitcast i8* %48 to { i64, %Callable* }** + %50 = load { i64, %Callable* }*, { i64, %Callable* }** %49, align 8 + %51 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %50, i32 0, i32 1 + %52 = load %Callable*, %Callable** %51, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %52, i32 -1) + %53 = bitcast { i64, %Callable* }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %54 = add i64 %46, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %55 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %44, i32 0, i32 1 + %56 = load %Callable*, %Callable** %55, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %56, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %56, i32 -1) + %57 = bitcast { i64, %Callable* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + ret { i64, %Callable* }* %44 +} + +define internal void @Microsoft__Quantum__Simulation__AddGeneratorSystems__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %5 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body({ i64, %Callable* }* %3, { i64, %Callable* }* %4) + %6 = bitcast %Tuple* %result-tuple to { { i64, %Callable* }* }* + %7 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %6, i32 0, i32 0 + store { i64, %Callable* }* %5, { i64, %Callable* }** %7, align 8 + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 %trotterOrder) { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, double, i64 }* + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store double %trotterStepSize, double* %4, align 8 + store i64 %trotterOrder, i64* %5, align 4 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__24__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__13__FunctionTable, %Tuple* %1) + %7 = call { %Callable* }* @Microsoft__Quantum__Simulation__SimulationAlgorithm__body(%Callable* %6) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + ret { %Callable* }* %7 +} + +define internal void @Lifted__PartialApplication__24__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %6 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 1 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 2 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %14 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store i64 %4, i64* %15, align 4 + store double %7, double* %16, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %6 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 1 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 2 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %14 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store i64 %4, i64* %15, align 4 + store double %7, double* %16, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 1 + %13 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 2 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 4 + store double %7, double* %18, align 8 + store i64 %9, i64* %19, align 4 + store double %11, double* %20, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %13, { { %Callable* }*, { i64, %Callable* }* }** %21, align 8 + store %Array* %15, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 1 + %13 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 2 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 4 + store double %7, double* %18, align 8 + store i64 %9, i64* %19, align 4 + store double %11, double* %20, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %13, { { %Callable* }*, { i64, %Callable* }* }** %21, align 8 + store %Array* %15, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load i64, i64* %2, align 4 + %8 = load double, double* %3, align 8 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____body(double %6, i64 %7, double %8, { { %Callable* }*, { i64, %Callable* }* }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load i64, i64* %2, align 4 + %8 = load double, double* %3, align 8 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____adj(double %6, i64 %7, double %8, { { %Callable* }*, { i64, %Callable* }* }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctl(%Array* %3, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctladj(%Array* %3, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__13__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__13__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__25__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, double, %Array* }* + %4 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %2, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + store i64 %5, i64* %13, align 4 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, double, %Array* }* + %4 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %2, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + store i64 %5, i64* %13, align 4 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, double, %Array* }*, { i64, double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 1 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 2 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %16 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %7, { { %Callable* }*, { i64, %Callable* }* }** %16, align 8 + store i64 %9, i64* %17, align 4 + store double %11, double* %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, double, %Array* }*, { i64, double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 1 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 2 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %16 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %7, { { %Callable* }*, { i64, %Callable* }* }** %16, align 8 + store i64 %9, i64* %17, align 4 + store double %11, double* %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %5 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %6 = load i64, i64* %2, align 4 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____body({ { %Callable* }*, { i64, %Callable* }* }* %5, i64 %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %5 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %6 = load i64, i64* %2, align 4 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____adj({ { %Callable* }*, { i64, %Callable* }* }* %5, i64 %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctl(%Array* %3, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctladj(%Array* %3, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__14__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__14__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__26__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @MemoryManagement__15__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__15__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %indices, %Array* %array) { +entry: + %sliced = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %0 = icmp eq i64 %nSliced, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %3 = bitcast i8* %2 to i64* + %4 = load i64, i64* %3, align 4 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %4) + %6 = bitcast i8* %5 to %Qubit** + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nSliced) + %9 = sub i64 %nSliced, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %10 = phi i64 [ 0, %continue__1 ], [ %14, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to %Qubit** + store %Qubit* %7, %Qubit** %13, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %8, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %nSliced, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %27, %exiting__2 ] + %16 = icmp sle i64 %idx, %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %idx) + %20 = bitcast i8* %19 to i64* + %21 = load i64, i64* %20, align 4 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %23 = bitcast i8* %22 to %Qubit** + %24 = load %Qubit*, %Qubit** %23, align 8 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idx) + %26 = bitcast i8* %25 to %Qubit** + store %Qubit* %24, %Qubit** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %28 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + ret %Array* %28 +} + +define internal %Array* @Microsoft__Quantum__Arrays___1d2b34a15cf5490eb8142fe0e14c514a_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %9 = icmp slt i64 %8, %0 + br i1 %9, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %8, %condTrue__1 ], [ %0, %condFalse__1 ] + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + %12 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %condContinue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %14 = bitcast i8* %13 to double* + %15 = load double, double* %14, align 8 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %17 = bitcast i8* %16 to %Array** + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { double, %Array* }* + %21 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %20, i32 0, i32 1 + store double %15, double* %21, align 8 + store %Array* %18, %Array** %22, align 8 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %24 = sub i64 %nElements, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %25) + %28 = bitcast i8* %27 to %Array** + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %24 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %31) + %34 = bitcast i8* %33 to { double, %Array* }** + store { double, %Array* }* %20, { double, %Array* }** %34, align 8 + %35 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %output, align 8 + %37 = sub i64 %nElements, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %38 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %38) + %41 = bitcast i8* %40 to { double, %Array* }** + %42 = load { double, %Array* }*, { double, %Array* }** %41, align 8 + %43 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %42, i32 0, i32 1 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { double, %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %47 = sub i64 %nElements, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idxElement = phi i64 [ 1, %exit__4 ], [ %67, %exiting__5 ] + %48 = icmp sle i64 %idxElement, %47 + br i1 %48, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = call %Array* @__quantum__rt__array_copy(%Array* %49, i1 false) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %52 = bitcast i8* %51 to double* + %53 = load double, double* %52, align 8 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %55 = bitcast i8* %54 to %Array** + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 1) + %57 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %58 = bitcast %Tuple* %57 to { double, %Array* }* + %59 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %58, i32 0, i32 0 + %60 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %58, i32 0, i32 1 + store double %53, double* %59, align 8 + store %Array* %56, %Array** %60, align 8 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %idxElement) + %62 = bitcast i8* %61 to { double, %Array* }** + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %63 = load { double, %Array* }*, { double, %Array* }** %62, align 8 + %64 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %63, i32 0, i32 1 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = bitcast { double, %Array* }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + store { double, %Array* }* %58, { double, %Array* }** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + store %Array* %50, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %67 = add i64 %idxElement, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %68 = load %Array*, %Array** %output, align 8 + %69 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + %70 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %76, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %71) + %74 = bitcast i8* %73 to %Array** + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %76 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %77 = call i64 @__quantum__rt__array_get_size_1d(%Array* %68) + %78 = sub i64 %77, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %79 = phi i64 [ 0, %exit__6 ], [ %87, %exiting__7 ] + %80 = icmp sle i64 %79, %78 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %68, i64 %79) + %82 = bitcast i8* %81 to { double, %Array* }** + %83 = load { double, %Array* }*, { double, %Array* }** %82, align 8 + %84 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %83, i32 0, i32 1 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { double, %Array* }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %87 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %68 +} + +define internal %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %9 = icmp slt i64 %0, %8 + br i1 %9, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %8, %condFalse__1 ] + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %12 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %condContinue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %14 = bitcast i8* %13 to %Array** + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double }* getelementptr ({ %Array*, double }, { %Array*, double }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, double }* + %21 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %20, i32 0, i32 1 + store %Array* %15, %Array** %21, align 8 + store double %18, double* %22, align 8 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %24 = sub i64 %nElements, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %25) + %28 = bitcast i8* %27 to %Array** + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %24 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %31) + %34 = bitcast i8* %33 to { %Array*, double }** + store { %Array*, double }* %20, { %Array*, double }** %34, align 8 + %35 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %output, align 8 + %37 = sub i64 %nElements, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %38 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %38) + %41 = bitcast i8* %40 to { %Array*, double }** + %42 = load { %Array*, double }*, { %Array*, double }** %41, align 8 + %43 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %42, i32 0, i32 0 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { %Array*, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %47 = sub i64 %nElements, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idxElement = phi i64 [ 1, %exit__4 ], [ %67, %exiting__5 ] + %48 = icmp sle i64 %idxElement, %47 + br i1 %48, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = call %Array* @__quantum__rt__array_copy(%Array* %49, i1 false) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %52 = bitcast i8* %51 to %Array** + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %55 = bitcast i8* %54 to double* + %56 = load double, double* %55, align 8 + %57 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double }* getelementptr ({ %Array*, double }, { %Array*, double }* null, i32 1) to i64)) + %58 = bitcast %Tuple* %57 to { %Array*, double }* + %59 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %58, i32 0, i32 0 + %60 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %58, i32 0, i32 1 + store %Array* %53, %Array** %59, align 8 + store double %56, double* %60, align 8 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %idxElement) + %62 = bitcast i8* %61 to { %Array*, double }** + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %63 = load { %Array*, double }*, { %Array*, double }** %62, align 8 + %64 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 0 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = bitcast { %Array*, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + store { %Array*, double }* %58, { %Array*, double }** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + store %Array* %50, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %67 = add i64 %idxElement, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %68 = load %Array*, %Array** %output, align 8 + %69 = load %Array*, %Array** %21, align 8 + %70 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %76, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %71) + %74 = bitcast i8* %73 to %Array** + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %76 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %77 = call i64 @__quantum__rt__array_get_size_1d(%Array* %68) + %78 = sub i64 %77, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %79 = phi i64 [ 0, %exit__6 ], [ %87, %exiting__7 ] + %80 = icmp sle i64 %79, %78 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %68, i64 %79) + %82 = bitcast i8* %81 to { %Array*, double }** + %83 = load { %Array*, double }*, { %Array*, double }** %82, align 8 + %84 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %83, i32 0, i32 0 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { %Array*, double }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %87 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %68 +} + +define internal %Array* @Microsoft__Quantum__Arrays___00d59157a6454ecdaf64b45c69ab4afd_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to i2* + %10 = load i2, i2* %9, align 1 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i2 }* getelementptr ({ i64, i2 }, { i64, i2 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, i2 }* + %13 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %12, i32 0, i32 1 + store i64 %7, i64* %13, align 4 + store i2 %10, i2* %14, align 1 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i64, i2 }** + store { i64, i2 }* %12, { i64, i2 }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i64, i2 }** + %27 = load { i64, i2 }*, { i64, i2 }** %26, align 8 + %28 = bitcast { i64, i2 }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i64* + %36 = load i64, i64* %35, align 4 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to i2* + %39 = load i2, i2* %38, align 1 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i2 }* getelementptr ({ i64, i2 }, { i64, i2 }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, i2 }* + %42 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %41, i32 0, i32 1 + store i64 %36, i64* %42, align 4 + store i2 %39, i2* %43, align 1 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i64, i2 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i64, i2 }*, { i64, i2 }** %45, align 8 + %47 = bitcast { i64, i2 }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i64, i2 }* %41, { i64, i2 }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i64, i2 }** + %56 = load { i64, i2 }*, { i64, i2 }** %55, align 8 + %57 = bitcast { i64, i2 }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal %Array* @Microsoft__Quantum__Arrays___5ac6d1808c4040b9aa3fa0e6ce75855c_Padded__body(i64 %nElementsTotal, { double, double }* %defaultElement, %Array* %inputArray) { +entry: + %0 = bitcast { double, double }* %defaultElement to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %1 = sub i64 %nElementsInitial, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %9 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %10 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @10, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %9, i1 true, %String* %10) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___4ca44cbcd7d8480ab6bb0acabd529c9a_ConstantArray__body(i64 %nElementsPad, { double, double }* %defaultElement) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %padArray) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %20 = icmp sge i64 %nElementsTotal, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__2 + %21 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %21) + %23 = sub i64 %22, 1 + br label %header__3 + +condFalse__1: ; preds = %exit__2 + %24 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %24) + %26 = sub i64 %25, 1 + br label %header__4 + +condContinue__1: ; preds = %exit__4, %exit__3 + %27 = phi %Array* [ %21, %exit__3 ], [ %24, %exit__4 ] + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + %28 = sub i64 %nElementsInitial, 1 + br label %header__5 + +header__3: ; preds = %exiting__3, %condTrue__1 + %29 = phi i64 [ 0, %condTrue__1 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %23 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + br label %condContinue__1 + +header__4: ; preds = %exiting__4, %condFalse__1 + %36 = phi i64 [ 0, %condFalse__1 ], [ %42, %exiting__4 ] + %37 = icmp sle i64 %36, %26 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %42 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + br label %condContinue__1 + +header__5: ; preds = %exiting__5, %condContinue__1 + %43 = phi i64 [ 0, %condContinue__1 ], [ %49, %exiting__5 ] + %44 = icmp sle i64 %43, %28 + br i1 %44, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %43) + %46 = bitcast i8* %45 to { double, double }** + %47 = load { double, double }*, { double, double }** %46, align 8 + %48 = bitcast { double, double }* %47 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %49 = add i64 %43, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + %50 = sub i64 %11, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %51 = phi i64 [ 0, %exit__5 ], [ %57, %exiting__6 ] + %52 = icmp sle i64 %51, %50 + br i1 %52, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %51) + %54 = bitcast i8* %53 to { double, double }** + %55 = load { double, double }*, { double, double }** %54, align 8 + %56 = bitcast { double, double }* %55 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %57 = add i64 %51, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %58 = sub i64 %11, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %59 = phi i64 [ 0, %exit__6 ], [ %65, %exiting__7 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %59) + %62 = bitcast i8* %61 to { double, double }** + %63 = load { double, double }*, { double, double }** %62, align 8 + %64 = bitcast { double, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %65 = add i64 %59, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %27 +} + +define internal i64 @Microsoft__Quantum__Math__AbsI__body(i64 %a) { +entry: + %0 = icmp slt i64 %a, 0 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = sub i64 0, %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi i64 [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret i64 %2 +} + +define internal void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = icmp ne i1 %actual, %expected + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Diagnostics___51fffc26616d4cc5a746323fd8bcea36___QsRef0__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___4ca44cbcd7d8480ab6bb0acabd529c9a_ConstantArray__body(i64 %length, { double, double }* %value) { +entry: + %0 = bitcast { double, double }* %value to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %2 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + store { double, double }* %value, { double, double }** %6, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret %Array* %1 +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal %Array* @Microsoft__Quantum__Arrays___8db1b1d8b63441b583b7338681e3b5b2_ConstantArray__body(i64 %length, double %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to double* + store double %value, double* %5, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal %Array* @Microsoft__Quantum__Arrays___ac214dcd588b470fb29f1cc67e145065_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %7 = icmp eq i64 %length, 0 + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %9 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %11 = bitcast i8* %10 to %Array** + %12 = load %Array*, %Array** %11, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %12, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %13, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { %Callable* }* + %18 = getelementptr inbounds { %Callable* }, { %Callable* }* %17, i32 0, i32 0 + %first = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %first, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %first, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %20 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %21 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %9 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %8 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %20 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %27) + %30 = bitcast i8* %29 to %Callable** + store %Callable* %first, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %first, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %first, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %19, %Array** %retval, align 8 + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %33 = phi i64 [ 0, %exit__3 ], [ %38, %exiting__4 ] + %34 = icmp sle i64 %33, %32 + br i1 %34, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %33) + %36 = bitcast i8* %35 to %Callable** + %37 = load %Callable*, %Callable** %36, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %37, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %37, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %38 = add i64 %33, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %39 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %56, %exiting__5 ] + %40 = icmp sle i64 %idx, %39 + br i1 %40, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %41 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %42 = call %Array* @__quantum__rt__array_copy(%Array* %41, i1 false) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %44 = bitcast i8* %43 to %Array** + %45 = load %Array*, %Array** %44, align 8 + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Array* }* + %48 = getelementptr inbounds { %Array* }, { %Array* }* %47, i32 0, i32 0 + store %Array* %45, %Array** %48, align 8 + %49 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %46, %Tuple* %49) + %50 = bitcast %Tuple* %49 to { %Callable* }* + %51 = getelementptr inbounds { %Callable* }, { %Callable* }* %50, i32 0, i32 0 + %52 = load %Callable*, %Callable** %51, align 8 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %idx) + %54 = bitcast i8* %53 to %Callable** + call void @__quantum__rt__capture_update_alias_count(%Callable* %52, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %52, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 1) + %55 = load %Callable*, %Callable** %54, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %55, i32 -1) + store %Callable* %52, %Callable** %54, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + store %Array* %42, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %56 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %57 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %58 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %59 = phi i64 [ 0, %exit__5 ], [ %64, %exiting__6 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %59) + %62 = bitcast i8* %61 to %Array** + %63 = load %Array*, %Array** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %63, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %64 = add i64 %59, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %first, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %first, i32 -1) + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %57) + %66 = sub i64 %65, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %67 = phi i64 [ 0, %exit__6 ], [ %72, %exiting__7 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %57, i64 %67) + %70 = bitcast i8* %69 to %Callable** + %71 = load %Callable*, %Callable** %70, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %71, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %71, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %72 = add i64 %67, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %first, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret %Array* %57 +} + +define internal %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to double* + %4 = load double, double* %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %4, double* %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { { double, double }* }* + %10 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %9, i32 0, i32 0 + %first = load { double, double }*, { double, double }** %10, align 8 + %11 = bitcast { double, double }* %first to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %13 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %14 = phi i64 [ 0, %continue__1 ], [ %18, %exiting__1 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %14) + %17 = bitcast i8* %16 to { double, double }** + store { double, double }* %first, { double, double }** %17, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %14, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %12, %Array** %retval, align 8 + %19 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %27 = sub i64 %length, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idx = phi i64 [ 1, %exit__2 ], [ %46, %exiting__3 ] + %28 = icmp sle i64 %idx, %27 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + %30 = call %Array* @__quantum__rt__array_copy(%Array* %29, i1 false) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %32 = bitcast i8* %31 to double* + %33 = load double, double* %32, align 8 + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double }* + %36 = getelementptr inbounds { double }, { double }* %35, i32 0, i32 0 + store double %33, double* %36, align 8 + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %34, %Tuple* %37) + %38 = bitcast %Tuple* %37 to { { double, double }* }* + %39 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %38, i32 0, i32 0 + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 %idx) + %42 = bitcast i8* %41 to { double, double }** + %43 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 1) + %44 = load { double, double }*, { double, double }** %42, align 8 + %45 = bitcast { double, double }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + store { double, double }* %40, { double, double }** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 1) + store %Array* %30, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %56, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { double, double }** + %54 = load { double, double }*, { double, double }** %53, align 8 + %55 = bitcast { double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %56 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %47 +} + +define internal %Range @Microsoft__Quantum__Arrays___d58849b717694e4ca69317572366b289_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Range @Microsoft__Quantum__Arrays___ab9454a18cf34e7dab26076c15ee491d_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i64, %Callable* }** + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %11 = sub i64 %0, 1 + %12 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %11, 2 + %13 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %14 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %14) + %17 = bitcast i8* %16 to { i64, %Callable* }** + %18 = load { i64, %Callable* }*, { i64, %Callable* }** %17, align 8 + %19 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %18, i32 0, i32 1 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + %21 = bitcast { i64, %Callable* }* %18 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %14, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %12 +} + +define internal i1 @Microsoft__Quantum__Arrays___d03f28613a2a406a92da3539b001d776_IsEmpty__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %14 = icmp eq i64 %0, 0 + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to { { double, double }*, %Array* }** + %20 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 0 + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 -1) + %24 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + %26 = bitcast { { double, double }*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %14 +} + +define internal %Callable* @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__body(i64 %index, %Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp sge i64 %index, 0 + br i1 %8, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %exit__1 + %9 = icmp slt i64 %index, %0 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %exit__1 + %10 = phi i1 [ %9, %condTrue__1 ], [ %8, %exit__1 ] + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @12, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %10, %String* %11) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %index) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %condContinue__1 + %16 = phi i64 [ 0, %condContinue__1 ], [ %21, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to %Callable** + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + ret %Callable* %14 +} + +define internal %Callable* @Microsoft__Quantum__Arrays___fc3dc354bc024fd5b7f38df86565fb27_LookupFunction__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %array, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %array, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__27__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Callable* %20 +} + +define internal void @Lifted__PartialApplication__27__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Array* }* + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 1 + store i64 %2, i64* %8, align 4 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Array* }* + %1 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Callable* @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__body(i64 %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Callable* }* + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + store %Callable* %5, %Callable** %7, align 8 + ret void +} + +define internal void @MemoryManagement__16__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__16__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___8023f18e08eb4c09a8a8acf673dba09b_ConstantArray__body(i64 %length, i2 %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 %value, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef1__ExtendedTruncation____body(double %value) { +entry: + %truncated = fptosi double %value to i64 + %0 = sitofp i64 %truncated to double + %1 = fsub double %0, %value + %2 = fcmp oge double %value, 0.000000e+00 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, i1 }* getelementptr ({ i64, double, i1 }, { i64, double, i1 }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i64, double, i1 }* + %5 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 2 + store i64 %truncated, i64* %5, align 4 + store double %1, double* %6, align 8 + store i1 %2, i1* %7, align 1 + ret { i64, double, i1 }* %4 +} + +define internal double @Microsoft__Quantum__Math__AbsComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %d = call double @Microsoft__Quantum__Math__AbsSquaredComplex__body({ double, double }* %input) + %1 = call double @__quantum__qis__sqrt__body(double %d) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %1 +} + +define internal double @Microsoft__Quantum__Math__AbsSquaredComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %real = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %imaginary = load double, double* %2, align 8 + %3 = fmul double %real, %real + %4 = fmul double %imaginary, %imaginary + %5 = fadd double %3, %4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %5 +} + +declare double @__quantum__qis__sqrt__body(double) + +define internal double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { +entry: + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + ret double %0 +} + +declare double @__quantum__qis__arctan2__body(double, double) + +define internal double @Microsoft__Quantum__Math__ArgComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %real = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %imaginary = load double, double* %2, align 8 + %3 = call double @__quantum__qis__arctan2__body(double %imaginary, double %real) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %3 +} + +define internal double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +define internal { double, double }* @Microsoft__Quantum__Math__Complex__body(double %Real, double %Imag) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Real, double* %2, align 8 + store double %Imag, double* %3, align 8 + ret { double, double }* %1 +} + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call double @Microsoft__Quantum__Math__AbsComplex__body({ double, double }* %input) + %2 = call double @Microsoft__Quantum__Math__ArgComplex__body({ double, double }* %input) + %3 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %1, double %2) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret { double, double }* %3 +} + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %Magnitude, double %Argument) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Magnitude, double* %2, align 8 + store double %Argument, double* %3, align 8 + ret { double, double }* %1 +} + +define internal double @Microsoft__Quantum__Math__Lg__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + %1 = call double @Microsoft__Quantum__Math__LogOf2__body() + %2 = fdiv double %0, %1 + ret double %2 +} + +declare double @__quantum__qis__log__body(double) + +define internal double @Microsoft__Quantum__Math__LogOf2__body() { +entry: + ret double 0x3FE62E42FEFA39EF +} + +define internal double @Microsoft__Quantum__Math__Log__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + ret double %0 +} + +define internal i64 @Microsoft__Quantum__Math__Max__body(%Array* %values) { +entry: + %max = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 0) + %1 = bitcast i8* %0 to i64* + %2 = load i64, i64* %1, align 4 + store i64 %2, i64* %max, align 4 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %3 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = load i64, i64* %max, align 4 + %9 = icmp sgt i64 %7, %8 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %11 = bitcast i8* %10 to i64* + %12 = load i64, i64* %11, align 4 + store i64 %12, i64* %max, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %13 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load i64, i64* %max, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + ret i64 %14 +} + +define internal i64 @Microsoft__Quantum__Math__Min__body(%Array* %values) { +entry: + %min = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 0) + %1 = bitcast i8* %0 to i64* + %2 = load i64, i64* %1, align 4 + store i64 %2, i64* %min, align 4 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %3 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = load i64, i64* %min, align 4 + %9 = icmp slt i64 %7, %8 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %11 = bitcast i8* %10 to i64* + %12 = load i64, i64* %11, align 4 + store i64 %12, i64* %min, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %13 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load i64, i64* %min, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + ret i64 %14 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.pow.f64(double, double) #0 + +define internal double @Microsoft__Quantum__Math__Sqrt__body(double %d) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double %d) + ret double %0 +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + %6 = call %Result* @__quantum__rt__result_get_zero() + call void @Microsoft__Quantum__Diagnostics__AssertQubit__body(%Result* %6, %Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__body(%Result* %expected, %Qubit* %q) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 -2, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %q, %Qubit** %5, align 8 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %0, %Array* %3, %Result* %expected, %String* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__adj(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctl(%Array* %ctrls, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctladj(%Array* %__controlQubits__, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctl(%Array* %__controlQubits__, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double 1.000000e+00, %String* %msg, double 1.000000e-10) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__body(%Array*, %Array*, %Result*, double, %String*, double) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %controllingQubits, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Array*, %Result*, %String* }* + %7 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 3 + store %Array* %bases, %Array** %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + store %Result* %result, %Result** %9, align 8 + store %String* %msg, %String** %10, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__adj(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__ctl(%Array*, { %Array*, %Array*, %Result*, double, %String*, double }*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare %String* @__quantum__rt__result_to_string(%Result*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__adj(%Result* %expected, %Qubit* %q) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 -2, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %q, %Qubit** %5, align 8 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %0, %Array* %3, %Result* %expected, %String* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__ctl(%Array* %__controlQubits__, { %Result*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 0 + %expected = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 1 + %q = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 -2, i2* %5, align 1 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %q, %Qubit** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array*, %Result*, %String* }* + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 3 + store %Array* %3, %Array** %14, align 8 + store %Array* %6, %Array** %15, align 8 + store %Result* %expected, %Result** %16, align 8 + store %String* %11, %String** %17, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__ctladj(%Array* %__controlQubits__, { %Result*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 0 + %expected = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 1 + %q = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 -2, i2* %5, align 1 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %q, %Qubit** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array*, %Result*, %String* }* + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 3 + store %Array* %3, %Array** %14, align 8 + store %Array* %6, %Array** %15, align 8 + store %Result* %expected, %Result** %16, align 8 + store %String* %11, %String** %17, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics___51fffc26616d4cc5a746323fd8bcea36___QsRef0__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @14, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_concatenate(%String* %0, %String* %message) + %2 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @15, i32 0, i32 0)) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %2, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + br i1 %expected, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %5 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @16, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @17, i32 0, i32 0)) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %7 = phi %String* [ %5, %condTrue__1 ], [ %6, %condFalse__1 ] + %8 = call %String* @__quantum__rt__string_concatenate(%String* %4, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @18, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__string_concatenate(%String* %8, %String* %9) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + br i1 %actual, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condContinue__1 + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @16, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condContinue__1 + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @17, i32 0, i32 0)) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condTrue__2 + %13 = phi %String* [ %11, %condTrue__2 ], [ %12, %condFalse__2 ] + %14 = call %String* @__quantum__rt__string_concatenate(%String* %10, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @__quantum__rt__fail(%String* %14) + unreachable +} + +define internal %Array* @Microsoft__Quantum__Convert__BoolArrayAsPauli__body(i2 %pauli, i1 %bitApply, %Array* %bits) { +entry: + %paulis = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nBits) + %1 = sub i64 %nBits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %0, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %7 = sub i64 %nBits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %8 = icmp sle i64 %idxBit, %7 + br i1 %8, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %9 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + %10 = call %Array* @__quantum__rt__array_copy(%Array* %9, i1 false) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bits, i64 %idxBit) + %12 = bitcast i8* %11 to i1* + %13 = load i1, i1* %12, align 1 + %14 = icmp eq i1 %13, %bitApply + %15 = select i1 %14, i2 %pauli, i2 0 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %idxBit) + %17 = bitcast i8* %16 to i2* + store i2 %15, i2* %17, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + store %Array* %10, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 -1) + ret %Array* %19 +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____body(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____adj(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctl(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctladj(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____body(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %actualControl, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %__qsVar0__actualControl__, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %actualControl, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %__qsVar0__actualControl__, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____body(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array* }* + %5 = getelementptr inbounds { %Array* }, { %Array* }* %4, i32 0, i32 0 + store %Array* %1, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %bareOp, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____adj(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + store %Array* %1, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %4, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctl(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctladj(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Preparation____QsRef0__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficients, { %Range, i64 }* %0) { +entry: + %plan = alloca %Array*, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %10 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %10, align 4 + %11 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %11, align 4 + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %12, %Array** %plan, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = call { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef0__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) + %14 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 0 + %disentanglingY = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 1) + %15 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 1 + %disentanglingZ = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 2 + %newCoefficients = load %Array*, %Array** %16, align 8 + %17 = call i64 @__quantum__rt__array_get_size_1d(%Array* %newCoefficients) + %18 = sub i64 %17, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %19 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %20 = icmp sle i64 %19, %18 + br i1 %20, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %19) + %22 = bitcast i8* %21 to { double, double }** + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %19, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 1) + %26 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingZ) + br i1 %26, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__2 + %27 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 3 + %34 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 4 + %35 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 5 + store %Callable* %27, %Callable** %30, align 8 + store double %tolerance, double* %31, align 8 + store %Array* %disentanglingZ, %Array** %32, align 8 + store i2 -2, i2* %33, align 1 + store %Range %rngControl, %Range* %34, align 4 + store i64 %idxTarget, i64* %35, align 4 + %36 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__28__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__17__FunctionTable, %Tuple* %28) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to %Callable** + store %Callable* %36, %Callable** %39, align 8 + %40 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 0) + %42 = bitcast i8* %41 to %Callable** + store %Callable* %36, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 1) + br label %header__3 + +continue__1: ; preds = %exit__4, %exit__2 + %43 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingY) + br i1 %43, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %44 = load %Array*, %Array** %plan, align 8 + %45 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 1) + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %48 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 0 + %49 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 1 + %50 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 2 + %51 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 3 + %52 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 4 + %53 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 5 + store %Callable* %45, %Callable** %48, align 8 + store double %tolerance, double* %49, align 8 + store %Array* %disentanglingY, %Array** %50, align 8 + store i2 -1, i2* %51, align 1 + store %Range %rngControl, %Range* %52, align 4 + store i64 %idxTarget, i64* %53, align 4 + %54 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__29__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__17__FunctionTable, %Tuple* %46) + %55 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to %Callable** + store %Callable* %54, %Callable** %57, align 8 + %58 = call %Array* @__quantum__rt__array_concatenate(%Array* %44, %Array* %55) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %58) + %60 = sub i64 %59, 1 + br label %header__5 + +continue__2: ; preds = %exit__9, %continue__1 + %61 = call i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rngControl) + br i1 %61, label %then0__3, label %test1__1 + +then0__3: ; preds = %continue__2 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 0) + %63 = bitcast i8* %62 to { double, double }** + %64 = load { double, double }*, { double, double }** %63, align 8 + %65 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 0 + %abs = load double, double* %65, align 8 + %66 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 1 + %arg = load double, double* %66, align 8 + %67 = call double @Microsoft__Quantum__Math__AbsD__body(double %arg) + %68 = fcmp ogt double %67, %tolerance + br i1 %68, label %then0__4, label %continue__4 + +then0__4: ; preds = %then0__3 + %69 = load %Array*, %Array** %plan, align 8 + %70 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %71 = fmul double -1.000000e+00, %arg + %72 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %73 = bitcast %Tuple* %72 to { %Callable*, double, i64 }* + %74 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 0 + %75 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 1 + %76 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 2 + store %Callable* %70, %Callable** %74, align 8 + store double %71, double* %75, align 8 + store i64 %idxTarget, i64* %76, align 4 + %77 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__30__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__18__FunctionTable, %Tuple* %72) + %78 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to %Callable** + store %Callable* %77, %Callable** %80, align 8 + %81 = call %Array* @__quantum__rt__array_concatenate(%Array* %69, %Array* %78) + %82 = call i64 @__quantum__rt__array_get_size_1d(%Array* %81) + %83 = sub i64 %82, 1 + br label %header__10 + +continue__4: ; preds = %exit__14, %then0__3 + br label %continue__3 + +test1__1: ; preds = %continue__2 + %84 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceCP____body(double %tolerance, %Array* %newCoefficients) + br i1 %84, label %then1__1, label %continue__3 + +then1__1: ; preds = %test1__1 + %85 = extractvalue %Range %rngControl, 0 + %86 = extractvalue %Range %rngControl, 1 + %87 = extractvalue %Range %rngControl, 2 + %88 = add i64 %85, 1 + %89 = extractvalue %Range %rngControl, 0 + %90 = extractvalue %Range %rngControl, 1 + %91 = extractvalue %Range %rngControl, 2 + %92 = extractvalue %Range %rngControl, 0 + %93 = extractvalue %Range %rngControl, 1 + %94 = extractvalue %Range %rngControl, 2 + %95 = insertvalue %Range zeroinitializer, i64 %88, 0 + %96 = insertvalue %Range %95, i64 %90, 1 + %newControl = insertvalue %Range %96, i64 %94, 2 + %newTarget = extractvalue %Range %rngControl, 0 + %97 = extractvalue %Range %rngControl, 1 + %98 = extractvalue %Range %rngControl, 2 + %99 = load %Array*, %Array** %plan, align 8 + %100 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %101 = bitcast %Tuple* %100 to { %Range, i64 }* + %102 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 0 + %103 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 1 + store %Range %newControl, %Range* %102, align 4 + store i64 %newTarget, i64* %103, align 4 + %104 = call %Array* @Microsoft__Quantum__Preparation____QsRef0__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %newCoefficients, { %Range, i64 }* %101) + %105 = call %Array* @__quantum__rt__array_concatenate(%Array* %99, %Array* %104) + %106 = call i64 @__quantum__rt__array_get_size_1d(%Array* %105) + %107 = sub i64 %106, 1 + br label %header__15 + +continue__3: ; preds = %exit__19, %test1__1, %continue__4 + %108 = load %Array*, %Array** %plan, align 8 + %109 = sub i64 %1, 1 + br label %header__20 + +header__3: ; preds = %exiting__3, %then0__1 + %110 = phi i64 [ 0, %then0__1 ], [ %115, %exiting__3 ] + %111 = icmp sle i64 %110, 0 + br i1 %111, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 %110) + %113 = bitcast i8* %112 to %Callable** + %114 = load %Callable*, %Callable** %113, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %114, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %114, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %115 = add i64 %110, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + store %Array* %40, %Array** %plan, align 8 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %116 = phi i64 [ 0, %exit__3 ], [ %121, %exiting__4 ] + %117 = icmp sle i64 %116, 0 + br i1 %117, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 %116) + %119 = bitcast i8* %118 to %Callable** + %120 = load %Callable*, %Callable** %119, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %120, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %120, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %121 = add i64 %116, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + br label %continue__1 + +header__5: ; preds = %exiting__5, %then0__2 + %122 = phi i64 [ 0, %then0__2 ], [ %127, %exiting__5 ] + %123 = icmp sle i64 %122, %60 + br i1 %123, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %122) + %125 = bitcast i8* %124 to %Callable** + %126 = load %Callable*, %Callable** %125, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %126, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %126, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %127 = add i64 %122, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 1) + %128 = sub i64 %59, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %129 = phi i64 [ 0, %exit__5 ], [ %134, %exiting__6 ] + %130 = icmp sle i64 %129, %128 + br i1 %130, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %129) + %132 = bitcast i8* %131 to %Callable** + %133 = load %Callable*, %Callable** %132, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %133, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %133, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %134 = add i64 %129, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %135 = call i64 @__quantum__rt__array_get_size_1d(%Array* %44) + %136 = sub i64 %135, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %137 = phi i64 [ 0, %exit__6 ], [ %142, %exiting__7 ] + %138 = icmp sle i64 %137, %136 + br i1 %138, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %139 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %137) + %140 = bitcast i8* %139 to %Callable** + %141 = load %Callable*, %Callable** %140, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %141, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %141, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %142 = add i64 %137, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %143 = sub i64 %135, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %144 = phi i64 [ 0, %exit__7 ], [ %149, %exiting__8 ] + %145 = icmp sle i64 %144, %143 + br i1 %145, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %144) + %147 = bitcast i8* %146 to %Callable** + %148 = load %Callable*, %Callable** %147, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %148, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %148, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %149 = add i64 %144, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + store %Array* %58, %Array** %plan, align 8 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %150 = phi i64 [ 0, %exit__8 ], [ %155, %exiting__9 ] + %151 = icmp sle i64 %150, 0 + br i1 %151, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 %150) + %153 = bitcast i8* %152 to %Callable** + %154 = load %Callable*, %Callable** %153, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %154, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %154, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %155 = add i64 %150, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 -1) + br label %continue__2 + +header__10: ; preds = %exiting__10, %then0__4 + %156 = phi i64 [ 0, %then0__4 ], [ %161, %exiting__10 ] + %157 = icmp sle i64 %156, %83 + br i1 %157, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %158 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %156) + %159 = bitcast i8* %158 to %Callable** + %160 = load %Callable*, %Callable** %159, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %160, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %160, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %161 = add i64 %156, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %162 = sub i64 %82, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %163) + %166 = bitcast i8* %165 to %Callable** + %167 = load %Callable*, %Callable** %166, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %167, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %167, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 1) + %169 = call i64 @__quantum__rt__array_get_size_1d(%Array* %69) + %170 = sub i64 %169, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %176, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %171) + %174 = bitcast i8* %173 to %Callable** + %175 = load %Callable*, %Callable** %174, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %175, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %175, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %176 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 -1) + %177 = sub i64 %169, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %178 = phi i64 [ 0, %exit__12 ], [ %183, %exiting__13 ] + %179 = icmp sle i64 %178, %177 + br i1 %179, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %180 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %178) + %181 = bitcast i8* %180 to %Callable** + %182 = load %Callable*, %Callable** %181, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %182, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %182, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %183 = add i64 %178, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + store %Array* %81, %Array** %plan, align 8 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %184 = phi i64 [ 0, %exit__13 ], [ %189, %exiting__14 ] + %185 = icmp sle i64 %184, 0 + br i1 %185, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 %184) + %187 = bitcast i8* %186 to %Callable** + %188 = load %Callable*, %Callable** %187, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %188, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %188, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %189 = add i64 %184, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + br label %continue__4 + +header__15: ; preds = %exiting__15, %then1__1 + %190 = phi i64 [ 0, %then1__1 ], [ %195, %exiting__15 ] + %191 = icmp sle i64 %190, %107 + br i1 %191, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %190) + %193 = bitcast i8* %192 to %Callable** + %194 = load %Callable*, %Callable** %193, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %194, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %194, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %195 = add i64 %190, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 1) + %196 = sub i64 %106, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %197 = phi i64 [ 0, %exit__15 ], [ %202, %exiting__16 ] + %198 = icmp sle i64 %197, %196 + br i1 %198, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %199 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %197) + %200 = bitcast i8* %199 to %Callable** + %201 = load %Callable*, %Callable** %200, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %201, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %201, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %202 = add i64 %197, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %105, i32 1) + %203 = call i64 @__quantum__rt__array_get_size_1d(%Array* %99) + %204 = sub i64 %203, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %205 = phi i64 [ 0, %exit__16 ], [ %210, %exiting__17 ] + %206 = icmp sle i64 %205, %204 + br i1 %206, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %205) + %208 = bitcast i8* %207 to %Callable** + %209 = load %Callable*, %Callable** %208, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %209, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %209, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %210 = add i64 %205, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + %211 = sub i64 %203, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %212 = phi i64 [ 0, %exit__17 ], [ %217, %exiting__18 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %212) + %215 = bitcast i8* %214 to %Callable** + %216 = load %Callable*, %Callable** %215, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %216, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %216, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %217 = add i64 %212, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 -1) + store %Array* %105, %Array** %plan, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %100, i32 -1) + %218 = call i64 @__quantum__rt__array_get_size_1d(%Array* %104) + %219 = sub i64 %218, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %220 = phi i64 [ 0, %exit__18 ], [ %225, %exiting__19 ] + %221 = icmp sle i64 %220, %219 + br i1 %221, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 %220) + %223 = bitcast i8* %222 to %Callable** + %224 = load %Callable*, %Callable** %223, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %224, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %224, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %225 = add i64 %220, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + br label %continue__3 + +header__20: ; preds = %exiting__20, %continue__3 + %226 = phi i64 [ 0, %continue__3 ], [ %232, %exiting__20 ] + %227 = icmp sle i64 %226, %109 + br i1 %227, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %228 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %226) + %229 = bitcast i8* %228 to { double, double }** + %230 = load { double, double }*, { double, double }** %229, align 8 + %231 = bitcast { double, double }* %230 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %231, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %232 = add i64 %226, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %233 = call i64 @__quantum__rt__array_get_size_1d(%Array* %108) + %234 = sub i64 %233, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %235 = phi i64 [ 0, %exit__20 ], [ %240, %exiting__21 ] + %236 = icmp sle i64 %235, %234 + br i1 %236, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %235) + %238 = bitcast i8* %237 to %Callable** + %239 = load %Callable*, %Callable** %238, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %239, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %239, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %240 = add i64 %235, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %108, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 -1) + %241 = sub i64 %17, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %242 = phi i64 [ 0, %exit__21 ], [ %248, %exiting__22 ] + %243 = icmp sle i64 %242, %241 + br i1 %243, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %242) + %245 = bitcast i8* %244 to { double, double }** + %246 = load { double, double }*, { double, double }** %245, align 8 + %247 = bitcast { double, double }* %246 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %248 = add i64 %242, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 -1) + %249 = sub i64 %17, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %250 = phi i64 [ 0, %exit__22 ], [ %256, %exiting__23 ] + %251 = icmp sle i64 %250, %249 + br i1 %251, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %252 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %250) + %253 = bitcast i8* %252 to { double, double }** + %254 = load { double, double }*, { double, double }** %253, align 8 + %255 = bitcast { double, double }* %254 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %255, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %256 = add i64 %250, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_reference_count(%Array* %newCoefficients, i32 -1) + %257 = bitcast { %Array*, %Array*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %257, i32 -1) + ret %Array* %108 +} + +define internal { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef0__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) { +entry: + %newCoefficients = alloca %Array*, align 8 + %disentanglingY = alloca %Array*, align 8 + %disentanglingZ = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sdiv i64 %0, 2 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %9) + %11 = sub i64 %9, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %12) + %15 = bitcast i8* %14 to double* + store double 0.000000e+00, double* %15, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %10, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %17 = sdiv i64 %0, 2 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %17) + %19 = sub i64 %17, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %20 = phi i64 [ 0, %exit__2 ], [ %24, %exiting__3 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %20) + %23 = bitcast i8* %22 to double* + store double 0.000000e+00, double* %23, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %24 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %25 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %26 = sdiv i64 %0, 2 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %26) + %28 = sub i64 %26, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %29 = phi i64 [ 0, %exit__3 ], [ %34, %exiting__4 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + store { double, double }* %25, { double, double }** %32, align 8 + %33 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %34 = add i64 %29, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + store %Array* %27, %Array** %newCoefficients, align 8 + %35 = sub i64 %26, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %36 = phi i64 [ 0, %exit__4 ], [ %42, %exiting__5 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %41, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %42 = add i64 %36, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %43 = sub i64 %0, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__5 + br label %header__6 + +header__6: ; preds = %exiting__6, %preheader__1 + %idxCoeff = phi i64 [ 0, %preheader__1 ], [ %80, %exiting__6 ] + %44 = icmp sle i64 %idxCoeff, %43 + %45 = icmp sge i64 %idxCoeff, %43 + %46 = select i1 true, i1 %44, i1 %45 + br i1 %46, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = add i64 %idxCoeff, 1 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %50) + %52 = bitcast i8* %51 to { double, double }** + %53 = load { double, double }*, { double, double }** %52, align 8 + %54 = call { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %49, { double, double }* %53) + %55 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 0 + %rt = load { double, double }*, { double, double }** %55, align 8 + %56 = bitcast { double, double }* %rt to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 1 + %phi = load double, double* %57, align 8 + %58 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 2 + %theta = load double, double* %58, align 8 + %59 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %59, i32 -1) + %60 = call %Array* @__quantum__rt__array_copy(%Array* %59, i1 false) + %61 = fmul double 5.000000e-01, %phi + %62 = sdiv i64 %idxCoeff, 2 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %62) + %64 = bitcast i8* %63 to double* + store double %61, double* %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 1) + store %Array* %60, %Array** %disentanglingZ, align 8 + %65 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = call %Array* @__quantum__rt__array_copy(%Array* %65, i1 false) + %67 = fmul double 5.000000e-01, %theta + %68 = sdiv i64 %idxCoeff, 2 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 %68) + %70 = bitcast i8* %69 to double* + %71 = load double, double* %70, align 8 + store double %67, double* %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + store %Array* %66, %Array** %disentanglingY, align 8 + %72 = load %Array*, %Array** %newCoefficients, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = call %Array* @__quantum__rt__array_copy(%Array* %72, i1 false) + %74 = sdiv i64 %idxCoeff, 2 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %74) + %76 = bitcast i8* %75 to { double, double }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 1) + %77 = load { double, double }*, { double, double }** %76, align 8 + %78 = bitcast { double, double }* %77 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i32 -1) + store { double, double }* %rt, { double, double }** %76, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 1) + store %Array* %73, %Array** %newCoefficients, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + %79 = bitcast { { double, double }*, double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %idxCoeff, 2 + br label %header__6 + +exit__6: ; preds = %header__6 + %81 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %82 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 1) + %83 = load %Array*, %Array** %newCoefficients, align 8 + %84 = call i64 @__quantum__rt__array_get_size_1d(%Array* %83) + %85 = sub i64 %84, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %86 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %87 = icmp sle i64 %86, %85 + br i1 %87, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %86) + %89 = bitcast i8* %88 to { double, double }** + %90 = load { double, double }*, { double, double }** %89, align 8 + %91 = bitcast { double, double }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %86, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 1) + %93 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Array* }* getelementptr ({ %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* null, i32 1) to i64)) + %94 = bitcast %Tuple* %93 to { %Array*, %Array*, %Array* }* + %95 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 0 + %96 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 1 + %97 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 2 + store %Array* %81, %Array** %95, align 8 + store %Array* %82, %Array** %96, align 8 + store %Array* %83, %Array** %97, align 8 + %98 = sub i64 %0, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %99 = phi i64 [ 0, %exit__7 ], [ %105, %exiting__8 ] + %100 = icmp sle i64 %99, %98 + br i1 %100, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %99) + %102 = bitcast i8* %101 to { double, double }** + %103 = load { double, double }*, { double, double }** %102, align 8 + %104 = bitcast { double, double }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %105 = add i64 %99, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + %106 = sub i64 %84, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %107 = phi i64 [ 0, %exit__8 ], [ %113, %exiting__9 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %107) + %110 = bitcast i8* %109 to { double, double }** + %111 = load { double, double }*, { double, double }** %110, align 8 + %112 = bitcast { double, double }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %112, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %113 = add i64 %107, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %83, i32 -1) + %114 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + %115 = sub i64 %84, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %116 = phi i64 [ 0, %exit__9 ], [ %122, %exiting__10 ] + %117 = icmp sle i64 %116, %115 + br i1 %117, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %116) + %119 = bitcast i8* %118 to { double, double }** + %120 = load { double, double }*, { double, double }** %119, align 8 + %121 = bitcast { double, double }* %120 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %121, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %122 = add i64 %116, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + ret { %Array*, %Array*, %Array* }* %94 +} + +define internal void @Lifted__PartialApplication__28__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____body(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____adj(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctl(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctladj(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__17__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__17__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__29__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____body(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____adj(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctl(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctladj(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__18__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__18__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %a0, { double, double }* %a1) { +entry: + %0 = bitcast { double, double }* %a0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = bitcast { double, double }* %a1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %abs0 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a0) + %abs1 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a1) + %arg0 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a0) + %arg1 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a1) + %2 = fmul double %abs0, %abs0 + %3 = fmul double %abs1, %abs1 + %d = fadd double %2, %3 + %r = call double @__quantum__qis__sqrt__body(double %d) + %4 = fadd double %arg0, %arg1 + %t = fmul double 5.000000e-01, %4 + %phi = fsub double %arg1, %arg0 + %5 = call double @__quantum__qis__arctan2__body(double %abs1, double %abs0) + %theta = fmul double 2.000000e+00, %5 + %6 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %r, double %t) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }*, double, double }* getelementptr ({ { double, double }*, double, double }, { { double, double }*, double, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { { double, double }*, double, double }* + %9 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 2 + store { double, double }* %6, { double, double }** %9, align 8 + store double %phi, double* %10, align 8 + store double %theta, double* %11, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + ret { { double, double }*, double, double }* %8 +} + +define internal %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %nQubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = trunc i64 %nQubits to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %12 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___5ac6d1808c4040b9aa3fa0e6ce75855c_Padded__body(i64 %11, { double, double }* %12, %Array* %coefficients) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %22 = icmp sgt i64 %nQubits, 1 + %23 = sub i64 %nQubits, 1 + %24 = insertvalue %Range { i64 1, i64 1, i64 0 }, i64 %23, 2 + %rngControl = select i1 %22, %Range %24, %Range { i64 1, i64 1, i64 0 } + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Range, i64 }* + %27 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 1 + store %Range %rngControl, %Range* %27, align 4 + store i64 0, i64* %28, align 4 + %plan = call %Array* @Microsoft__Quantum__Preparation____QsRef0__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficientsPadded, { %Range, i64 }* %26) + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %plan) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %31) + %34 = bitcast i8* %33 to %Callable** + %35 = load %Callable*, %Callable** %34, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %35, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 1) + %unprepare = call %Callable* @Microsoft__Quantum__Canon___55c7b8d161af40c49ac844f8a0630208_BoundCA__body(%Array* %plan) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 1) + %37 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %38 = call %Callable* @__quantum__rt__callable_copy(%Callable* %unprepare, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %38) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, %Callable* }* + %41 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 1 + store %Callable* %37, %Callable** %41, align 8 + store %Callable* %38, %Callable** %42, align 8 + %43 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__31__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__19__FunctionTable, %Tuple* %39) + %44 = sub i64 %0, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %51, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %45) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = bitcast { double, double }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %50, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %51 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %52 = sub i64 %13, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %53 = phi i64 [ 0, %exit__4 ], [ %59, %exiting__5 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %53) + %56 = bitcast i8* %55 to { double, double }** + %57 = load { double, double }*, { double, double }** %56, align 8 + %58 = bitcast { double, double }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %59 = add i64 %53, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + %60 = sub i64 %29, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %61 = phi i64 [ 0, %exit__5 ], [ %66, %exiting__6 ] + %62 = icmp sle i64 %61, %60 + br i1 %62, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %61) + %64 = bitcast i8* %63 to %Callable** + %65 = load %Callable*, %Callable** %64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %65, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %65, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %66 = add i64 %61, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 -1) + %67 = bitcast { double, double }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %67, i32 -1) + %68 = sub i64 %13, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %69 = phi i64 [ 0, %exit__6 ], [ %75, %exiting__7 ] + %70 = icmp sle i64 %69, %68 + br i1 %70, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %69) + %72 = bitcast i8* %71 to { double, double }** + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %75 = add i64 %69, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + %76 = sub i64 %29, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %77 = phi i64 [ 0, %exit__7 ], [ %82, %exiting__8 ] + %78 = icmp sle i64 %77, %76 + br i1 %78, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %77) + %80 = bitcast i8* %79 to %Callable** + %81 = load %Callable*, %Callable** %80, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %81, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %81, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %82 = add i64 %77, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unprepare, i32 -1) + ret %Callable* %43 +} + +define internal void @Lifted__PartialApplication__31__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____body(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____adj(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctl(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctladj(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @MemoryManagement__19__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__19__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %13 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %12) + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %11, %Tuple* null) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %13 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %12) + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %13, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %14) + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %11, %Tuple* null) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %17 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %16) + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { %Array* }* }* + %21 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %21, align 8 + store { %Array* }* %qubits, { %Array* }** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %23 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %24) + %27 = bitcast i8* %26 to { double, double }** + %28 = load { double, double }*, { double, double }** %27, align 8 + %29 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %17 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %16) + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { %Array* }* }* + %21 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %21, align 8 + store { %Array* }* %qubits, { %Array* }** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %23 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %24) + %27 = bitcast i8* %26 to { double, double }** + %28 = load { double, double }*, { double, double }** %27, align 8 + %29 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double 0.000000e+00, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__32__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %4) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %8, %Callable* %9) + %coefficientsAsComplexPolar = call %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %10, %Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsAsComplexPolar) + %12 = sub i64 %11, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %13 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double %tolerance, %Array* %coefficientsAsComplexPolar, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %20 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %21) + %24 = bitcast i8* %23 to { double, double }** + %25 = load { double, double }*, { double, double }** %24, align 8 + %26 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %28 = sub i64 %11, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Math__ComplexPolar__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { { double, double }* }* + %7 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %6, i32 0, i32 0 + store { double, double }* %5, { double, double }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__20__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__20__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Math__AbsD__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = call double @Microsoft__Quantum__Math__AbsD__body(double %2) + %4 = bitcast %Tuple* %result-tuple to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + store double %3, double* %5, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double 0.000000e+00, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__33__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %4) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %8, %Callable* %9) + %__qsVar0__coefficientsAsComplexPolar__ = call %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %10, %Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__) + %12 = sub i64 %11, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %13 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double %tolerance, %Array* %__qsVar0__coefficientsAsComplexPolar__, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %20 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %21) + %24 = bitcast i8* %23 to { double, double }** + %25 = load { double, double }*, { double, double }** %24, align 8 + %26 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %28 = sub i64 %11, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double 0.000000e+00, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__34__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %8) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = call %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %12, %Callable* %13) + %coefficientsAsComplexPolar = call %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %14, %Array* %coefficients) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsAsComplexPolar) + %16 = sub i64 %15, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %17 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 1) + %24 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array*, { %Array* }* }* + %34 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 2 + store double %tolerance, double* %34, align 8 + store %Array* %coefficientsAsComplexPolar, %Array** %35, align 8 + store { %Array* }* %qubits, { %Array* }** %36, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %33) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + %37 = sub i64 %15, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %38) + %41 = bitcast i8* %40 to { double, double }** + %42 = load { double, double }*, { double, double }** %41, align 8 + %43 = bitcast { double, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + %45 = sub i64 %15, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %46) + %49 = bitcast i8* %48 to { double, double }** + %50 = load { double, double }*, { double, double }** %49, align 8 + %51 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + %53 = sub i64 %15, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %60, %exiting__5 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %54) + %57 = bitcast i8* %56 to { double, double }** + %58 = load { double, double }*, { double, double }** %57, align 8 + %59 = bitcast { double, double }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %60 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double 0.000000e+00, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__35__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %8) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = call %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %12, %Callable* %13) + %__qsVar0__coefficientsAsComplexPolar__ = call %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %14, %Array* %coefficients) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__) + %16 = sub i64 %15, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %17 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + %24 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array*, { %Array* }* }* + %34 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 2 + store double %tolerance, double* %34, align 8 + store %Array* %__qsVar0__coefficientsAsComplexPolar__, %Array** %35, align 8 + store { %Array* }* %qubits, { %Array* }** %36, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %33) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + %37 = sub i64 %15, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %38) + %41 = bitcast i8* %40 to { double, double }** + %42 = load { double, double }*, { double, double }** %41, align 8 + %43 = bitcast { double, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + %45 = sub i64 %15, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %46) + %49 = bitcast i8* %48 to { double, double }** + %50 = load { double, double }*, { double, double }** %49, align 8 + %51 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + %53 = sub i64 %15, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %60, %exiting__5 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %54) + %57 = bitcast i8* %56 to { double, double }** + %58 = load { double, double }*, { double, double }** %57, align 8 + %59 = bitcast { double, double }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %60 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__body(%Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__adj(%Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %4) + %7 = bitcast i8* %6 to { double, double }** + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %11, align 8 + %12 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { double, %Array*, { %Array* }* }* + %25 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 2 + store double 0.000000e+00, double* %25, align 8 + store %Array* %coefficients, %Array** %26, align 8 + store { %Array* }* %qubits, { %Array* }** %27, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %36 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %43, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %37) + %40 = bitcast i8* %39 to { double, double }** + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %43 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %4) + %7 = bitcast i8* %6 to { double, double }** + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %11, align 8 + %12 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { double, %Array*, { %Array* }* }* + %25 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 2 + store double 0.000000e+00, double* %25, align 8 + store %Array* %coefficients, %Array** %26, align 8 + store { %Array* }* %qubits, { %Array* }** %27, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %36 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %43, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %37) + %40 = bitcast i8* %39 to { double, double }** + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %43 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__body(%Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__body(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__adj(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array*, { %Array* }* }* + %8 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 2 + store double 0.000000e+00, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %qubits, { %Array* }** %10, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %7) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array*, { %Array* }* }* + %8 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 2 + store double 0.000000e+00, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %qubits, { %Array* }** %10, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %7) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__body(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__adj(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__h__ctl(%Array*, %Qubit*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +declare %Result* @__quantum__rt__result_get_one() + +define internal void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare void @__quantum__qis__s__body(%Qubit*) + +declare void @__quantum__qis__s__adj(%Qubit*) + +declare void @__quantum__qis__s__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__s__ctladj(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %p = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %q = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %12 = bitcast i8* %11 to i64* + %r = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %14 = bitcast i8* %13 to i64* + %s = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %18 = fmul double 1.250000e-01, %17 + %angle = fmul double %18, %stepSize + %19 = icmp eq i64 %p, %q + br i1 %19, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %20 = icmp eq i64 %p, %r + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %21 = phi i1 [ %19, %entry ], [ %20, %condFalse__1 ] + br i1 %21, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %22 = icmp eq i64 %p, %s + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %23 = phi i1 [ %21, %condContinue__1 ], [ %22, %condFalse__2 ] + br i1 %23, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %24 = icmp eq i64 %q, %r + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %25 = phi i1 [ %23, %condContinue__2 ], [ %24, %condFalse__3 ] + br i1 %25, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %26 = icmp eq i64 %q, %s + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %27 = phi i1 [ %25, %condContinue__3 ], [ %26, %condFalse__4 ] + br i1 %27, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %28 = icmp eq i64 %r, %s + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %29 = phi i1 [ %27, %condContinue__4 ], [ %28, %condFalse__5 ] + br i1 %29, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %31 = call %String* @__quantum__rt__int_to_string(i64 %p) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__int_to_string(i64 %q) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %r) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %s) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %46) + unreachable + +continue__1: ; preds = %condContinue__5 + %47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 1) + %51 = bitcast i8* %50 to i2* + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 2) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 3) + %55 = bitcast i8* %54 to i2* + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + store i2 1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i2* + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 2) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 3) + %64 = bitcast i8* %63 to i2* + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + store i2 1, i2* %62, align 1 + store i2 -1, i2* %64, align 1 + %65 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i2* + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 2) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 3) + %73 = bitcast i8* %72 to i2* + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + store i2 -1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + %74 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 2) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 3) + %82 = bitcast i8* %81 to i2* + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + store i2 -1, i2* %80, align 1 + store i2 -1, i2* %82, align 1 + %83 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 0) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 1) + %87 = bitcast i8* %86 to i2* + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 2) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 3) + %91 = bitcast i8* %90 to i2* + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + store i2 1, i2* %89, align 1 + store i2 1, i2* %91, align 1 + %92 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i2* + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 2) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 3) + %100 = bitcast i8* %99 to i2* + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + store i2 1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + %101 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i2* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i2* + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 2) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 3) + %109 = bitcast i8* %108 to i2* + store i2 -1, i2* %103, align 1 + store i2 -1, i2* %105, align 1 + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + %110 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i2* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i2* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %112, align 1 + store i2 1, i2* %114, align 1 + store i2 -1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %134 = bitcast i8* %133 to %Array** + store %Array* %47, %Array** %120, align 8 + store %Array* %56, %Array** %122, align 8 + store %Array* %65, %Array** %124, align 8 + store %Array* %74, %Array** %126, align 8 + store %Array* %83, %Array** %128, align 8 + store %Array* %92, %Array** %130, align 8 + store %Array* %101, %Array** %132, align 8 + store %Array* %110, %Array** %134, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %135 = phi i64 [ 0, %continue__1 ], [ %140, %exiting__1 ] + %136 = icmp sle i64 %135, 7 + br i1 %136, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %135) + %138 = bitcast i8* %137 to %Array** + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %140 = add i64 %135, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %141 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 0) + %143 = bitcast i8* %142 to i64* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 1) + %145 = bitcast i8* %144 to i64* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 2) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 3) + %149 = bitcast i8* %148 to i64* + store i64 %p, i64* %143, align 4 + store i64 %q, i64* %145, align 4 + store i64 %r, i64* %147, align 4 + store i64 %s, i64* %149, align 4 + %150 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %141) + %151 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 0 + %sortedIndices = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 1) + %152 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 1 + %signs = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %153 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 2 + %globalSign = load double, double* %153, align 8 + %154 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %ops, %Array* %signs) + %155 = call i64 @__quantum__rt__array_get_size_1d(%Array* %154) + %156 = sub i64 %155, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %157 = phi i64 [ 0, %exit__1 ], [ %166, %exiting__2 ] + %158 = icmp sle i64 %157, %156 + br i1 %158, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %159 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %157) + %160 = bitcast i8* %159 to { %Array*, double }** + %161 = load { %Array*, double }*, { %Array*, double }** %160, align 8 + %162 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %161, i32 0, i32 0 + %op = load %Array*, %Array** %162, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %163 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %161, i32 0, i32 1 + %sign = load double, double* %163, align 8 + %164 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %164, %Array* %sortedIndices, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %165 = fmul double %globalSign, %sign + %theta = fmul double %165, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %pauliString, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %166 = add i64 %157, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %167 = phi i64 [ 0, %exit__2 ], [ %172, %exiting__3 ] + %168 = icmp sle i64 %167, 7 + br i1 %168, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %167) + %170 = bitcast i8* %169 to %Array** + %171 = load %Array*, %Array** %170, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %171, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %172 = add i64 %167, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %173 = phi i64 [ 0, %exit__3 ], [ %178, %exiting__4 ] + %174 = icmp sle i64 %173, 7 + br i1 %174, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %175 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %173) + %176 = bitcast i8* %175 to %Array** + %177 = load %Array*, %Array** %176, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %177, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %178 = add i64 %173, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %179 = bitcast { %Array*, %Array*, double }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %179, i32 -1) + %180 = sub i64 %155, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %181 = phi i64 [ 0, %exit__4 ], [ %189, %exiting__5 ] + %182 = icmp sle i64 %181, %180 + br i1 %182, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %181) + %184 = bitcast i8* %183 to { %Array*, double }** + %185 = load { %Array*, double }*, { %Array*, double }** %184, align 8 + %186 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %185, i32 0, i32 0 + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %187, i32 -1) + %188 = bitcast { %Array*, double }* %185 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %189 = add i64 %181, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + ret void +} + +define internal { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %indices) { +entry: + %sign = alloca double, align 8 + %signs = alloca %Array*, align 8 + %sorted = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %1 = bitcast i8* %0 to i64* + %p = load i64, i64* %1, align 4 + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %3 = bitcast i8* %2 to i64* + %q = load i64, i64* %3, align 4 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 2) + %5 = bitcast i8* %4 to i64* + %r = load i64, i64* %5, align 4 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %7 = bitcast i8* %6 to i64* + %s = load i64, i64* %7, align 4 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 0) + %10 = bitcast i8* %9 to i64* + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 1) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 2) + %14 = bitcast i8* %13 to i64* + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 3) + %16 = bitcast i8* %15 to i64* + store i64 0, i64* %10, align 4 + store i64 0, i64* %12, align 4 + store i64 0, i64* %14, align 4 + store i64 0, i64* %16, align 4 + store %Array* %8, %Array** %sorted, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to double* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to double* + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 2) + %23 = bitcast i8* %22 to double* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 3) + %25 = bitcast i8* %24 to double* + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 4) + %27 = bitcast i8* %26 to double* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 5) + %29 = bitcast i8* %28 to double* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 6) + %31 = bitcast i8* %30 to double* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 7) + %33 = bitcast i8* %32 to double* + store double 0.000000e+00, double* %19, align 8 + store double 0.000000e+00, double* %21, align 8 + store double 0.000000e+00, double* %23, align 8 + store double 0.000000e+00, double* %25, align 8 + store double 0.000000e+00, double* %27, align 8 + store double 0.000000e+00, double* %29, align 8 + store double 0.000000e+00, double* %31, align 8 + store double 0.000000e+00, double* %33, align 8 + store %Array* %17, %Array** %signs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + store double 1.000000e+00, double* %sign, align 8 + %34 = icmp sgt i64 %p, %q + br i1 %34, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + store double -1.000000e+00, double* %sign, align 8 + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %35 = icmp sgt i64 %r, %s + br i1 %35, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %36 = load double, double* %sign, align 8 + %37 = fmul double %36, -1.000000e+00 + store double %37, double* %sign, align 8 + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i64* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 1) + %42 = bitcast i8* %41 to i64* + store i64 %p, i64* %40, align 4 + store i64 %q, i64* %42, align 4 + %43 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %38) + %44 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 0) + %46 = bitcast i8* %45 to i64* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 1) + %48 = bitcast i8* %47 to i64* + store i64 %r, i64* %46, align 4 + store i64 %s, i64* %48, align 4 + %49 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %44) + %50 = icmp sgt i64 %43, %49 + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + br i1 %50, label %then0__3, label %else__1 + +then0__3: ; preds = %continue__2 + %51 = load double, double* %sign, align 8 + %52 = fmul double %51, -1.000000e+00 + store double %52, double* %sign, align 8 + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to i64* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 1) + %57 = bitcast i8* %56 to i64* + store i64 %r, i64* %55, align 4 + store i64 %s, i64* %57, align 4 + %58 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %53) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + %59 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 0) + %61 = bitcast i8* %60 to i64* + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 1) + %63 = bitcast i8* %62 to i64* + store i64 %r, i64* %61, align 4 + store i64 %s, i64* %63, align 4 + %64 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %59) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + %65 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i64* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i64* + store i64 %p, i64* %67, align 4 + store i64 %q, i64* %69, align 4 + %70 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %65) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + %71 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 0) + %73 = bitcast i8* %72 to i64* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 1) + %75 = bitcast i8* %74 to i64* + store i64 %p, i64* %73, align 4 + store i64 %q, i64* %75, align 4 + %76 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %71) + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + %77 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 0) + %79 = bitcast i8* %78 to i64* + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 1) + %81 = bitcast i8* %80 to i64* + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 2) + %83 = bitcast i8* %82 to i64* + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 3) + %85 = bitcast i8* %84 to i64* + store i64 %58, i64* %79, align 4 + store i64 %64, i64* %81, align 4 + store i64 %70, i64* %83, align 4 + store i64 %76, i64* %85, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %77, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + store %Array* %77, %Array** %sorted, align 8 + br label %continue__3 + +else__1: ; preds = %continue__2 + %86 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 0) + %88 = bitcast i8* %87 to i64* + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 1) + %90 = bitcast i8* %89 to i64* + store i64 %p, i64* %88, align 4 + store i64 %q, i64* %90, align 4 + %91 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %86) + call void @__quantum__rt__array_update_reference_count(%Array* %86, i32 -1) + %92 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i64* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i64* + store i64 %p, i64* %94, align 4 + store i64 %q, i64* %96, align 4 + %97 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %92) + call void @__quantum__rt__array_update_reference_count(%Array* %92, i32 -1) + %98 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 0) + %100 = bitcast i8* %99 to i64* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 1) + %102 = bitcast i8* %101 to i64* + store i64 %r, i64* %100, align 4 + store i64 %s, i64* %102, align 4 + %103 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %98) + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + %104 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 0) + %106 = bitcast i8* %105 to i64* + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 1) + %108 = bitcast i8* %107 to i64* + store i64 %r, i64* %106, align 4 + store i64 %s, i64* %108, align 4 + %109 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %104) + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + %110 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i64* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i64* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i64* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i64* + store i64 %91, i64* %112, align 4 + store i64 %97, i64* %114, align 4 + store i64 %103, i64* %116, align 4 + store i64 %109, i64* %118, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %110, i32 1) + %119 = load %Array*, %Array** %sorted, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 -1) + store %Array* %110, %Array** %sorted, align 8 + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + %120 = load %Array*, %Array** %sorted, align 8 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 0) + %122 = bitcast i8* %121 to i64* + %p1 = load i64, i64* %122, align 4 + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 1) + %124 = bitcast i8* %123 to i64* + %q1 = load i64, i64* %124, align 4 + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 2) + %126 = bitcast i8* %125 to i64* + %r1 = load i64, i64* %126, align 4 + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 3) + %128 = bitcast i8* %127 to i64* + %s1 = load i64, i64* %128, align 4 + %129 = icmp slt i64 %q1, %r1 + br i1 %129, label %then0__4, label %test1__1 + +then0__4: ; preds = %continue__3 + %130 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 0) + %132 = bitcast i8* %131 to i64* + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 1) + %134 = bitcast i8* %133 to i64* + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 2) + %136 = bitcast i8* %135 to i64* + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 3) + %138 = bitcast i8* %137 to i64* + store i64 %p1, i64* %132, align 4 + store i64 %q1, i64* %134, align 4 + store i64 %r1, i64* %136, align 4 + store i64 %s1, i64* %138, align 4 + %139 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %140 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 0) + %141 = bitcast i8* %140 to double* + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 1) + %143 = bitcast i8* %142 to double* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 2) + %145 = bitcast i8* %144 to double* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 3) + %147 = bitcast i8* %146 to double* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 4) + %149 = bitcast i8* %148 to double* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 5) + %151 = bitcast i8* %150 to double* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 6) + %153 = bitcast i8* %152 to double* + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 7) + %155 = bitcast i8* %154 to double* + store double 1.000000e+00, double* %141, align 8 + store double -1.000000e+00, double* %143, align 8 + store double -1.000000e+00, double* %145, align 8 + store double -1.000000e+00, double* %147, align 8 + store double 1.000000e+00, double* %149, align 8 + store double 1.000000e+00, double* %151, align 8 + store double 1.000000e+00, double* %153, align 8 + store double -1.000000e+00, double* %155, align 8 + %156 = load double, double* %sign, align 8 + %157 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %158 = bitcast %Tuple* %157 to { %Array*, %Array*, double }* + %159 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 0 + %160 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 1 + %161 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 2 + store %Array* %130, %Array** %159, align 8 + store %Array* %139, %Array** %160, align 8 + store double %156, double* %161, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %158 + +test1__1: ; preds = %continue__3 + %162 = icmp sgt i64 %q1, %r1 + br i1 %162, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %test1__1 + %163 = icmp slt i64 %q1, %s1 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %test1__1 + %164 = phi i1 [ %163, %condTrue__1 ], [ %162, %test1__1 ] + br i1 %164, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__1 + %165 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 0) + %167 = bitcast i8* %166 to i64* + %168 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 1) + %169 = bitcast i8* %168 to i64* + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 2) + %171 = bitcast i8* %170 to i64* + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 3) + %173 = bitcast i8* %172 to i64* + store i64 %p1, i64* %167, align 4 + store i64 %r1, i64* %169, align 4 + store i64 %q1, i64* %171, align 4 + store i64 %s1, i64* %173, align 4 + %174 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %175 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 0) + %176 = bitcast i8* %175 to double* + %177 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 1) + %178 = bitcast i8* %177 to double* + %179 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 2) + %180 = bitcast i8* %179 to double* + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 3) + %182 = bitcast i8* %181 to double* + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 4) + %184 = bitcast i8* %183 to double* + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 5) + %186 = bitcast i8* %185 to double* + %187 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 6) + %188 = bitcast i8* %187 to double* + %189 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 7) + %190 = bitcast i8* %189 to double* + store double -1.000000e+00, double* %176, align 8 + store double -1.000000e+00, double* %178, align 8 + store double -1.000000e+00, double* %180, align 8 + store double 1.000000e+00, double* %182, align 8 + store double -1.000000e+00, double* %184, align 8 + store double 1.000000e+00, double* %186, align 8 + store double 1.000000e+00, double* %188, align 8 + store double 1.000000e+00, double* %190, align 8 + %191 = load double, double* %sign, align 8 + %192 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %193 = bitcast %Tuple* %192 to { %Array*, %Array*, double }* + %194 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 0 + %195 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 1 + %196 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 2 + store %Array* %165, %Array** %194, align 8 + store %Array* %174, %Array** %195, align 8 + store double %191, double* %196, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %193 + +test2__1: ; preds = %condContinue__1 + %197 = icmp sgt i64 %q1, %r1 + br i1 %197, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %test2__1 + %198 = icmp sgt i64 %q1, %s1 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %test2__1 + %199 = phi i1 [ %198, %condTrue__2 ], [ %197, %test2__1 ] + br i1 %199, label %then2__1, label %else__2 + +then2__1: ; preds = %condContinue__2 + %200 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 0) + %202 = bitcast i8* %201 to i64* + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 1) + %204 = bitcast i8* %203 to i64* + %205 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 2) + %206 = bitcast i8* %205 to i64* + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 3) + %208 = bitcast i8* %207 to i64* + store i64 %p1, i64* %202, align 4 + store i64 %r1, i64* %204, align 4 + store i64 %s1, i64* %206, align 4 + store i64 %q1, i64* %208, align 4 + %209 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 0) + %211 = bitcast i8* %210 to double* + %212 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 1) + %213 = bitcast i8* %212 to double* + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 2) + %215 = bitcast i8* %214 to double* + %216 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 3) + %217 = bitcast i8* %216 to double* + %218 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 4) + %219 = bitcast i8* %218 to double* + %220 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 5) + %221 = bitcast i8* %220 to double* + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 6) + %223 = bitcast i8* %222 to double* + %224 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 7) + %225 = bitcast i8* %224 to double* + store double 1.000000e+00, double* %211, align 8 + store double 1.000000e+00, double* %213, align 8 + store double -1.000000e+00, double* %215, align 8 + store double 1.000000e+00, double* %217, align 8 + store double -1.000000e+00, double* %219, align 8 + store double 1.000000e+00, double* %221, align 8 + store double -1.000000e+00, double* %223, align 8 + store double -1.000000e+00, double* %225, align 8 + %226 = load double, double* %sign, align 8 + %227 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %228 = bitcast %Tuple* %227 to { %Array*, %Array*, double }* + %229 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 0 + %230 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 1 + %231 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 2 + store %Array* %200, %Array** %229, align 8 + store %Array* %209, %Array** %230, align 8 + store double %226, double* %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %228 + +else__2: ; preds = %condContinue__2 + %232 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @29, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__fail(%String* %232) + unreachable + +continue__4: ; No predecessors! + unreachable +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %nFermions, %Array* %idxFermions, %Array* %pauliReplacements) { +entry: + %pauliString = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliReplacements, i32 1) + %0 = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliZString__body(i64 %nFermions, %Array* %idxFermions) + store %Array* %0, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %1 = call %Range @Microsoft__Quantum__Arrays___d58849b717694e4ca69317572366b289_IndexRange__body(%Array* %idxFermions) + %2 = extractvalue %Range %1, 0 + %3 = extractvalue %Range %1, 1 + %4 = extractvalue %Range %1, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %5 = icmp sgt i64 %3, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %2, %preheader__1 ], [ %17, %exiting__1 ] + %6 = icmp sle i64 %idx, %4 + %7 = icmp sge i64 %idx, %4 + %8 = select i1 %5, i1 %6, i1 %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %idx) + %10 = bitcast i8* %9 to i64* + %idxFermion = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %pauliReplacements, i64 %idx) + %12 = bitcast i8* %11 to i2* + %op = load i2, i2* %12, align 1 + %13 = load %Array*, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + %14 = call %Array* @__quantum__rt__array_copy(%Array* %13, i1 false) + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %idxFermion) + %16 = bitcast i8* %15 to i2* + store i2 %op, i2* %16, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + store %Array* %14, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %idx, %3 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = load %Array*, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliReplacements, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + ret %Array* %18 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__p__ = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %__qsVar4__q__ = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %12 = bitcast i8* %11 to i64* + %__qsVar5__r__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %14 = bitcast i8* %13 to i64* + %__qsVar6__s__ = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %18 = fmul double 1.250000e-01, %17 + %__qsVar7__angle__ = fmul double %18, %stepSize + %19 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %19, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %20 = icmp eq i64 %__qsVar3__p__, %__qsVar5__r__ + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %21 = phi i1 [ %19, %entry ], [ %20, %condFalse__1 ] + br i1 %21, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %22 = icmp eq i64 %__qsVar3__p__, %__qsVar6__s__ + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %23 = phi i1 [ %21, %condContinue__1 ], [ %22, %condFalse__2 ] + br i1 %23, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %24 = icmp eq i64 %__qsVar4__q__, %__qsVar5__r__ + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %25 = phi i1 [ %23, %condContinue__2 ], [ %24, %condFalse__3 ] + br i1 %25, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %26 = icmp eq i64 %__qsVar4__q__, %__qsVar6__s__ + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %27 = phi i1 [ %25, %condContinue__3 ], [ %26, %condFalse__4 ] + br i1 %27, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %28 = icmp eq i64 %__qsVar5__r__, %__qsVar6__s__ + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %29 = phi i1 [ %27, %condContinue__4 ], [ %28, %condFalse__5 ] + br i1 %29, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %31 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar5__r__) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar6__s__) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %46) + unreachable + +continue__1: ; preds = %condContinue__5 + %47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 1) + %51 = bitcast i8* %50 to i2* + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 2) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 3) + %55 = bitcast i8* %54 to i2* + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + store i2 1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i2* + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 2) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 3) + %64 = bitcast i8* %63 to i2* + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + store i2 1, i2* %62, align 1 + store i2 -1, i2* %64, align 1 + %65 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i2* + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 2) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 3) + %73 = bitcast i8* %72 to i2* + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + store i2 -1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + %74 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 2) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 3) + %82 = bitcast i8* %81 to i2* + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + store i2 -1, i2* %80, align 1 + store i2 -1, i2* %82, align 1 + %83 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 0) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 1) + %87 = bitcast i8* %86 to i2* + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 2) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 3) + %91 = bitcast i8* %90 to i2* + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + store i2 1, i2* %89, align 1 + store i2 1, i2* %91, align 1 + %92 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i2* + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 2) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 3) + %100 = bitcast i8* %99 to i2* + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + store i2 1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + %101 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i2* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i2* + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 2) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 3) + %109 = bitcast i8* %108 to i2* + store i2 -1, i2* %103, align 1 + store i2 -1, i2* %105, align 1 + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + %110 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i2* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i2* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %112, align 1 + store i2 1, i2* %114, align 1 + store i2 -1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %__qsVar10__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 0) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 1) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 2) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 3) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 4) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 5) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 6) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 7) + %134 = bitcast i8* %133 to %Array** + store %Array* %47, %Array** %120, align 8 + store %Array* %56, %Array** %122, align 8 + store %Array* %65, %Array** %124, align 8 + store %Array* %74, %Array** %126, align 8 + store %Array* %83, %Array** %128, align 8 + store %Array* %92, %Array** %130, align 8 + store %Array* %101, %Array** %132, align 8 + store %Array* %110, %Array** %134, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %135 = phi i64 [ 0, %continue__1 ], [ %140, %exiting__1 ] + %136 = icmp sle i64 %135, 7 + br i1 %136, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %135) + %138 = bitcast i8* %137 to %Array** + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %140 = add i64 %135, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 1) + %141 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 0) + %143 = bitcast i8* %142 to i64* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 1) + %145 = bitcast i8* %144 to i64* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 2) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 3) + %149 = bitcast i8* %148 to i64* + store i64 %__qsVar3__p__, i64* %143, align 4 + store i64 %__qsVar4__q__, i64* %145, align 4 + store i64 %__qsVar5__r__, i64* %147, align 4 + store i64 %__qsVar6__s__, i64* %149, align 4 + %150 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %141) + %151 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 0 + %__qsVar11__sortedIndices__ = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 1) + %152 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 1 + %__qsVar12__signs__ = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 1) + %153 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 2 + %__qsVar13__globalSign__ = load double, double* %153, align 8 + %154 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %155 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %156 = call i64 @__quantum__rt__array_get_size_1d(%Array* %155) + %157 = sub i64 %156, 1 + %158 = insertvalue %Range zeroinitializer, i64 %157, 0 + %159 = insertvalue %Range %158, i64 -1, 1 + %160 = insertvalue %Range %159, i64 0, 2 + %161 = call %Array* @__quantum__rt__array_slice_1d(%Array* %154, %Range %160, i1 true) + %162 = call i64 @__quantum__rt__array_get_size_1d(%Array* %161) + %163 = sub i64 %162, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %164 = phi i64 [ 0, %exit__1 ], [ %173, %exiting__2 ] + %165 = icmp sle i64 %164, %163 + br i1 %165, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 %164) + %167 = bitcast i8* %166 to { %Array*, double }** + %168 = load { %Array*, double }*, { %Array*, double }** %167, align 8 + %169 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %168, i32 0, i32 0 + %__qsVar14__op__ = load %Array*, %Array** %169, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 1) + %170 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %168, i32 0, i32 1 + %__qsVar15__sign__ = load double, double* %170, align 8 + %171 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar16__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %171, %Array* %__qsVar11__sortedIndices__, %Array* %__qsVar14__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + %172 = fmul double %__qsVar13__globalSign__, %__qsVar15__sign__ + %theta = fmul double %172, %__qsVar7__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %__qsVar16__pauliString__, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %173 = add i64 %164, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %174 = phi i64 [ 0, %exit__2 ], [ %179, %exiting__3 ] + %175 = icmp sle i64 %174, 7 + br i1 %175, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %176 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %174) + %177 = bitcast i8* %176 to %Array** + %178 = load %Array*, %Array** %177, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %178, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %179 = add i64 %174, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %180 = phi i64 [ 0, %exit__3 ], [ %185, %exiting__4 ] + %181 = icmp sle i64 %180, 7 + br i1 %181, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %180) + %183 = bitcast i8* %182 to %Array** + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %184, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %185 = add i64 %180, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar12__signs__, i32 -1) + %186 = bitcast { %Array*, %Array*, double }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %186, i32 -1) + %187 = call i64 @__quantum__rt__array_get_size_1d(%Array* %154) + %188 = sub i64 %187, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %189 = phi i64 [ 0, %exit__4 ], [ %197, %exiting__5 ] + %190 = icmp sle i64 %189, %188 + br i1 %190, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %189) + %192 = bitcast i8* %191 to { %Array*, double }** + %193 = load { %Array*, double }*, { %Array*, double }** %192, align 8 + %194 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %193, i32 0, i32 0 + %195 = load %Array*, %Array** %194, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %195, i32 -1) + %196 = bitcast { %Array*, double }* %193 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %196, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %197 = add i64 %189, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + %198 = sub i64 %156, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %199 = phi i64 [ 0, %exit__5 ], [ %207, %exiting__6 ] + %200 = icmp sle i64 %199, %198 + br i1 %200, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %155, i64 %199) + %202 = bitcast i8* %201 to { %Array*, double }** + %203 = load { %Array*, double }*, { %Array*, double }** %202, align 8 + %204 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %203, i32 0, i32 0 + %205 = load %Array*, %Array** %204, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %205, i32 -1) + %206 = bitcast { %Array*, double }* %203 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %206, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %207 = add i64 %199, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %155, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %p = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %q = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %16 = bitcast i8* %15 to i64* + %r = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %18 = bitcast i8* %17 to i64* + %s = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = fmul double 1.250000e-01, %21 + %angle = fmul double %22, %stepSize + %23 = icmp eq i64 %p, %q + br i1 %23, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %24 = icmp eq i64 %p, %r + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %25 = phi i1 [ %23, %entry ], [ %24, %condFalse__1 ] + br i1 %25, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %26 = icmp eq i64 %p, %s + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %27 = phi i1 [ %25, %condContinue__1 ], [ %26, %condFalse__2 ] + br i1 %27, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %28 = icmp eq i64 %q, %r + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %29 = phi i1 [ %27, %condContinue__2 ], [ %28, %condFalse__3 ] + br i1 %29, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %30 = icmp eq i64 %q, %s + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %31 = phi i1 [ %29, %condContinue__3 ], [ %30, %condFalse__4 ] + br i1 %31, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %32 = icmp eq i64 %r, %s + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %33 = phi i1 [ %31, %condContinue__4 ], [ %32, %condFalse__5 ] + br i1 %33, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__int_to_string(i64 %p) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %q) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %r) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__int_to_string(i64 %s) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %50) + unreachable + +continue__1: ; preds = %condContinue__5 + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 -1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 -1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 -1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 -1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 1, i2* %93, align 1 + store i2 1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 -1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %139 = phi i64 [ 0, %continue__1 ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %145 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 0) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 1) + %149 = bitcast i8* %148 to i64* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 2) + %151 = bitcast i8* %150 to i64* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 3) + %153 = bitcast i8* %152 to i64* + store i64 %p, i64* %147, align 4 + store i64 %q, i64* %149, align 4 + store i64 %r, i64* %151, align 4 + store i64 %s, i64* %153, align 4 + %154 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %145) + %155 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 0 + %sortedIndices = load %Array*, %Array** %155, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 1) + %156 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 1 + %signs = load %Array*, %Array** %156, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %157 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 2 + %globalSign = load double, double* %157, align 8 + %158 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %ops, %Array* %signs) + %159 = call i64 @__quantum__rt__array_get_size_1d(%Array* %158) + %160 = sub i64 %159, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %161 = phi i64 [ 0, %exit__1 ], [ %175, %exiting__2 ] + %162 = icmp sle i64 %161, %160 + br i1 %162, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %163 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %161) + %164 = bitcast i8* %163 to { %Array*, double }** + %165 = load { %Array*, double }*, { %Array*, double }** %164, align 8 + %166 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %165, i32 0, i32 0 + %op = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %167 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %165, i32 0, i32 1 + %sign = load double, double* %167, align 8 + %168 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %168, %Array* %sortedIndices, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %169 = fmul double %globalSign, %sign + %theta = fmul double %169, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %170 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %171 = bitcast %Tuple* %170 to { %Array*, double, %Array* }* + %172 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 0 + %173 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 1 + %174 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 2 + store %Array* %pauliString, %Array** %172, align 8 + store double %theta, double* %173, align 8 + store %Array* %qubits, %Array** %174, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %171) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %170, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %175 = add i64 %161, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %176 = phi i64 [ 0, %exit__2 ], [ %181, %exiting__3 ] + %177 = icmp sle i64 %176, 7 + br i1 %177, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %176) + %179 = bitcast i8* %178 to %Array** + %180 = load %Array*, %Array** %179, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %180, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %181 = add i64 %176, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %182 = phi i64 [ 0, %exit__3 ], [ %187, %exiting__4 ] + %183 = icmp sle i64 %182, 7 + br i1 %183, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %184 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %182) + %185 = bitcast i8* %184 to %Array** + %186 = load %Array*, %Array** %185, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %186, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %187 = add i64 %182, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %188 = bitcast { %Array*, %Array*, double }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + %189 = sub i64 %159, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %190 = phi i64 [ 0, %exit__4 ], [ %198, %exiting__5 ] + %191 = icmp sle i64 %190, %189 + br i1 %191, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %190) + %193 = bitcast i8* %192 to { %Array*, double }** + %194 = load { %Array*, double }*, { %Array*, double }** %193, align 8 + %195 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %194, i32 0, i32 0 + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %196, i32 -1) + %197 = bitcast { %Array*, double }* %194 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %197, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %198 = add i64 %190, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %158, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__p__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %__qsVar4__q__ = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %16 = bitcast i8* %15 to i64* + %__qsVar5__r__ = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %18 = bitcast i8* %17 to i64* + %__qsVar6__s__ = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = fmul double 1.250000e-01, %21 + %__qsVar7__angle__ = fmul double %22, %stepSize + %23 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %23, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %24 = icmp eq i64 %__qsVar3__p__, %__qsVar5__r__ + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %25 = phi i1 [ %23, %entry ], [ %24, %condFalse__1 ] + br i1 %25, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %26 = icmp eq i64 %__qsVar3__p__, %__qsVar6__s__ + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %27 = phi i1 [ %25, %condContinue__1 ], [ %26, %condFalse__2 ] + br i1 %27, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %28 = icmp eq i64 %__qsVar4__q__, %__qsVar5__r__ + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %29 = phi i1 [ %27, %condContinue__2 ], [ %28, %condFalse__3 ] + br i1 %29, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %30 = icmp eq i64 %__qsVar4__q__, %__qsVar6__s__ + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %31 = phi i1 [ %29, %condContinue__3 ], [ %30, %condFalse__4 ] + br i1 %31, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %32 = icmp eq i64 %__qsVar5__r__, %__qsVar6__s__ + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %33 = phi i1 [ %31, %condContinue__4 ], [ %32, %condFalse__5 ] + br i1 %33, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar5__r__) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar6__s__) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %50) + unreachable + +continue__1: ; preds = %condContinue__5 + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 -1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 -1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 -1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 -1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 1, i2* %93, align 1 + store i2 1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 -1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %__qsVar10__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %139 = phi i64 [ 0, %continue__1 ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 1) + %145 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 0) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 1) + %149 = bitcast i8* %148 to i64* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 2) + %151 = bitcast i8* %150 to i64* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 3) + %153 = bitcast i8* %152 to i64* + store i64 %__qsVar3__p__, i64* %147, align 4 + store i64 %__qsVar4__q__, i64* %149, align 4 + store i64 %__qsVar5__r__, i64* %151, align 4 + store i64 %__qsVar6__s__, i64* %153, align 4 + %154 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %145) + %155 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 0 + %__qsVar11__sortedIndices__ = load %Array*, %Array** %155, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 1) + %156 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 1 + %__qsVar12__signs__ = load %Array*, %Array** %156, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 1) + %157 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 2 + %__qsVar13__globalSign__ = load double, double* %157, align 8 + %158 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %159 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %160 = call i64 @__quantum__rt__array_get_size_1d(%Array* %159) + %161 = sub i64 %160, 1 + %162 = insertvalue %Range zeroinitializer, i64 %161, 0 + %163 = insertvalue %Range %162, i64 -1, 1 + %164 = insertvalue %Range %163, i64 0, 2 + %165 = call %Array* @__quantum__rt__array_slice_1d(%Array* %158, %Range %164, i1 true) + %166 = call i64 @__quantum__rt__array_get_size_1d(%Array* %165) + %167 = sub i64 %166, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %168 = phi i64 [ 0, %exit__1 ], [ %182, %exiting__2 ] + %169 = icmp sle i64 %168, %167 + br i1 %169, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 %168) + %171 = bitcast i8* %170 to { %Array*, double }** + %172 = load { %Array*, double }*, { %Array*, double }** %171, align 8 + %173 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %172, i32 0, i32 0 + %__qsVar14__op__ = load %Array*, %Array** %173, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 1) + %174 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %172, i32 0, i32 1 + %__qsVar15__sign__ = load double, double* %174, align 8 + %175 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar16__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %175, %Array* %__qsVar11__sortedIndices__, %Array* %__qsVar14__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + %176 = fmul double %__qsVar13__globalSign__, %__qsVar15__sign__ + %theta = fmul double %176, %__qsVar7__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %177 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %178 = bitcast %Tuple* %177 to { %Array*, double, %Array* }* + %179 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 0 + %180 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 1 + %181 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 2 + store %Array* %__qsVar16__pauliString__, %Array** %179, align 8 + store double %theta, double* %180, align 8 + store %Array* %qubits, %Array** %181, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %178) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %177, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %182 = add i64 %168, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %183 = phi i64 [ 0, %exit__2 ], [ %188, %exiting__3 ] + %184 = icmp sle i64 %183, 7 + br i1 %184, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %183) + %186 = bitcast i8* %185 to %Array** + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %187, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %188 = add i64 %183, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %189 = phi i64 [ 0, %exit__3 ], [ %194, %exiting__4 ] + %190 = icmp sle i64 %189, 7 + br i1 %190, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %189) + %192 = bitcast i8* %191 to %Array** + %193 = load %Array*, %Array** %192, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %193, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %194 = add i64 %189, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar12__signs__, i32 -1) + %195 = bitcast { %Array*, %Array*, double }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %195, i32 -1) + %196 = call i64 @__quantum__rt__array_get_size_1d(%Array* %158) + %197 = sub i64 %196, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %198 = phi i64 [ 0, %exit__4 ], [ %206, %exiting__5 ] + %199 = icmp sle i64 %198, %197 + br i1 %199, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %200 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %198) + %201 = bitcast i8* %200 to { %Array*, double }** + %202 = load { %Array*, double }*, { %Array*, double }** %201, align 8 + %203 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %202, i32 0, i32 0 + %204 = load %Array*, %Array** %203, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %204, i32 -1) + %205 = bitcast { %Array*, double }* %202 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %205, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %206 = add i64 %198, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %158, i32 -1) + %207 = sub i64 %160, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %208 = phi i64 [ 0, %exit__5 ], [ %216, %exiting__6 ] + %209 = icmp sle i64 %208, %207 + br i1 %209, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %159, i64 %208) + %211 = bitcast i8* %210 to { %Array*, double }** + %212 = load { %Array*, double }*, { %Array*, double }** %211, align 8 + %213 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %212, i32 0, i32 0 + %214 = load %Array*, %Array** %213, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %214, i32 -1) + %215 = bitcast { %Array*, double }* %212 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %215, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %216 = add i64 %208, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %159, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %165, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %p = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %q = load i64, i64* %10, align 4 + %11 = icmp eq i64 %p, %q + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__int_to_string(i64 %p) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__int_to_string(i64 %q) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__1: ; preds = %entry + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %22 = bitcast i8* %21 to double* + %23 = load double, double* %22, align 8 + %24 = fmul double 5.000000e-01, %23 + %angle = fmul double %24, %stepSize + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 1, i2* %27, align 1 + store i2 -1, i2* %29, align 1 + %30 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 0) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 1) + %34 = bitcast i8* %33 to i2* + store i2 -1, i2* %32, align 1 + store i2 1, i2* %34, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %36 = bitcast i8* %35 to %Array** + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %38 = bitcast i8* %37 to %Array** + store %Array* %25, %Array** %36, align 8 + store %Array* %30, %Array** %38, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %44, %exiting__1 ] + %40 = icmp sle i64 %39, 1 + br i1 %40, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %39) + %42 = bitcast i8* %41 to %Array** + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %44 = add i64 %39, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %signs = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 0) + %46 = bitcast i8* %45 to double* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 1) + %48 = bitcast i8* %47 to double* + store double 1.000000e+00, double* %46, align 8 + store double -1.000000e+00, double* %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %ops, %Array* %signs) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %52 = phi i64 [ 0, %exit__1 ], [ %60, %exiting__2 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { %Array*, double }** + %56 = load { %Array*, double }*, { %Array*, double }** %55, align 8 + %57 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %56, i32 0, i32 0 + %op = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %58 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %56, i32 0, i32 1 + %sign = load double, double* %58, align 8 + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %59, %Array* %idxFermions, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %theta = fmul double %sign, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %pauliString, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %52, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %61 = phi i64 [ 0, %exit__2 ], [ %66, %exiting__3 ] + %62 = icmp sle i64 %61, 1 + br i1 %62, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %61) + %64 = bitcast i8* %63 to %Array** + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %66 = add i64 %61, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %67 = phi i64 [ 0, %exit__3 ], [ %72, %exiting__4 ] + %68 = icmp sle i64 %67, 1 + br i1 %68, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %67) + %70 = bitcast i8* %69 to %Array** + %71 = load %Array*, %Array** %70, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %72 = add i64 %67, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %73 = sub i64 %50, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %74 = phi i64 [ 0, %exit__4 ], [ %82, %exiting__5 ] + %75 = icmp sle i64 %74, %73 + br i1 %75, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %74) + %77 = bitcast i8* %76 to { %Array*, double }** + %78 = load { %Array*, double }*, { %Array*, double }** %77, align 8 + %79 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %78, i32 0, i32 0 + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + %81 = bitcast { %Array*, double }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %81, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %82 = add i64 %74, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__p__ = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %__qsVar4__q__ = load i64, i64* %10, align 4 + %11 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__1: ; preds = %entry + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %22 = bitcast i8* %21 to double* + %23 = load double, double* %22, align 8 + %24 = fmul double 5.000000e-01, %23 + %__qsVar5__angle__ = fmul double %24, %stepSize + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 1, i2* %27, align 1 + store i2 -1, i2* %29, align 1 + %30 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 0) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 1) + %34 = bitcast i8* %33 to i2* + store i2 -1, i2* %32, align 1 + store i2 1, i2* %34, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %36 = bitcast i8* %35 to %Array** + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %38 = bitcast i8* %37 to %Array** + store %Array* %25, %Array** %36, align 8 + store %Array* %30, %Array** %38, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %44, %exiting__1 ] + %40 = icmp sle i64 %39, 1 + br i1 %40, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %39) + %42 = bitcast i8* %41 to %Array** + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %44 = add i64 %39, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %__qsVar7__signs__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 0) + %46 = bitcast i8* %45 to double* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 1) + %48 = bitcast i8* %47 to double* + store double 1.000000e+00, double* %46, align 8 + store double -1.000000e+00, double* %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %50 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %51 = call i64 @__quantum__rt__array_get_size_1d(%Array* %50) + %52 = sub i64 %51, 1 + %53 = insertvalue %Range zeroinitializer, i64 %52, 0 + %54 = insertvalue %Range %53, i64 -1, 1 + %55 = insertvalue %Range %54, i64 0, 2 + %56 = call %Array* @__quantum__rt__array_slice_1d(%Array* %49, %Range %55, i1 true) + %57 = call i64 @__quantum__rt__array_get_size_1d(%Array* %56) + %58 = sub i64 %57, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %59 = phi i64 [ 0, %exit__1 ], [ %67, %exiting__2 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %59) + %62 = bitcast i8* %61 to { %Array*, double }** + %63 = load { %Array*, double }*, { %Array*, double }** %62, align 8 + %64 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 0 + %__qsVar8__op__ = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %65 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 1 + %__qsVar9__sign__ = load double, double* %65, align 8 + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar10__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %66, %Array* %__qsVar2__idxFermions__, %Array* %__qsVar8__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + %theta = fmul double %__qsVar9__sign__, %__qsVar5__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %__qsVar10__pauliString__, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %67 = add i64 %59, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %68 = phi i64 [ 0, %exit__2 ], [ %73, %exiting__3 ] + %69 = icmp sle i64 %68, 1 + br i1 %69, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %68) + %71 = bitcast i8* %70 to %Array** + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %73 = add i64 %68, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %74 = phi i64 [ 0, %exit__3 ], [ %79, %exiting__4 ] + %75 = icmp sle i64 %74, 1 + br i1 %75, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %74) + %77 = bitcast i8* %76 to %Array** + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %79 = add i64 %74, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__signs__, i32 -1) + %80 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %81 = sub i64 %80, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %82 = phi i64 [ 0, %exit__4 ], [ %90, %exiting__5 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %82) + %85 = bitcast i8* %84 to { %Array*, double }** + %86 = load { %Array*, double }*, { %Array*, double }** %85, align 8 + %87 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %88, i32 -1) + %89 = bitcast { %Array*, double }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %89, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %90 = add i64 %82, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %91 = sub i64 %51, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %92 = phi i64 [ 0, %exit__5 ], [ %100, %exiting__6 ] + %93 = icmp sle i64 %92, %91 + br i1 %93, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %92) + %95 = bitcast i8* %94 to { %Array*, double }** + %96 = load { %Array*, double }*, { %Array*, double }** %95, align 8 + %97 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %96, i32 0, i32 0 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + %99 = bitcast { %Array*, double }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %99, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %100 = add i64 %92, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %p = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %q = load i64, i64* %14, align 4 + %15 = icmp eq i64 %p, %q + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__int_to_string(i64 %p) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__int_to_string(i64 %q) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %24 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %23) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %24) + unreachable + +continue__1: ; preds = %entry + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %26 = bitcast i8* %25 to double* + %27 = load double, double* %26, align 8 + %28 = fmul double 5.000000e-01, %27 + %angle = fmul double %28, %stepSize + %29 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 0) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 1) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %31, align 1 + store i2 -1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + store i2 -1, i2* %36, align 1 + store i2 1, i2* %38, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %40 = bitcast i8* %39 to %Array** + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %42 = bitcast i8* %41 to %Array** + store %Array* %29, %Array** %40, align 8 + store %Array* %34, %Array** %42, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %43 = phi i64 [ 0, %continue__1 ], [ %48, %exiting__1 ] + %44 = icmp sle i64 %43, 1 + br i1 %44, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %48 = add i64 %43, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %signs = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 0) + %50 = bitcast i8* %49 to double* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 1) + %52 = bitcast i8* %51 to double* + store double 1.000000e+00, double* %50, align 8 + store double -1.000000e+00, double* %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %53 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %ops, %Array* %signs) + %54 = call i64 @__quantum__rt__array_get_size_1d(%Array* %53) + %55 = sub i64 %54, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %56 = phi i64 [ 0, %exit__1 ], [ %69, %exiting__2 ] + %57 = icmp sle i64 %56, %55 + br i1 %57, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %56) + %59 = bitcast i8* %58 to { %Array*, double }** + %60 = load { %Array*, double }*, { %Array*, double }** %59, align 8 + %61 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %60, i32 0, i32 0 + %op = load %Array*, %Array** %61, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %62 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %60, i32 0, i32 1 + %sign = load double, double* %62, align 8 + %63 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %63, %Array* %idxFermions, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %theta = fmul double %sign, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { %Array*, double, %Array* }* + %66 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 2 + store %Array* %pauliString, %Array** %66, align 8 + store double %theta, double* %67, align 8 + store %Array* %qubits, %Array** %68, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %65) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %69 = add i64 %56, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %70 = phi i64 [ 0, %exit__2 ], [ %75, %exiting__3 ] + %71 = icmp sle i64 %70, 1 + br i1 %71, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %70) + %73 = bitcast i8* %72 to %Array** + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %75 = add i64 %70, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %76 = phi i64 [ 0, %exit__3 ], [ %81, %exiting__4 ] + %77 = icmp sle i64 %76, 1 + br i1 %77, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %76) + %79 = bitcast i8* %78 to %Array** + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %81 = add i64 %76, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %82 = sub i64 %54, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %83 = phi i64 [ 0, %exit__4 ], [ %91, %exiting__5 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %83) + %86 = bitcast i8* %85 to { %Array*, double }** + %87 = load { %Array*, double }*, { %Array*, double }** %86, align 8 + %88 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 -1) + %90 = bitcast { %Array*, double }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %90, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %91 = add i64 %83, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__p__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %__qsVar4__q__ = load i64, i64* %14, align 4 + %15 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %24 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %23) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %24) + unreachable + +continue__1: ; preds = %entry + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %26 = bitcast i8* %25 to double* + %27 = load double, double* %26, align 8 + %28 = fmul double 5.000000e-01, %27 + %__qsVar5__angle__ = fmul double %28, %stepSize + %29 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 0) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 1) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %31, align 1 + store i2 -1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + store i2 -1, i2* %36, align 1 + store i2 1, i2* %38, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %40 = bitcast i8* %39 to %Array** + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %42 = bitcast i8* %41 to %Array** + store %Array* %29, %Array** %40, align 8 + store %Array* %34, %Array** %42, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %43 = phi i64 [ 0, %continue__1 ], [ %48, %exiting__1 ] + %44 = icmp sle i64 %43, 1 + br i1 %44, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %48 = add i64 %43, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %__qsVar7__signs__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 0) + %50 = bitcast i8* %49 to double* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 1) + %52 = bitcast i8* %51 to double* + store double 1.000000e+00, double* %50, align 8 + store double -1.000000e+00, double* %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 1) + %53 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %54 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %55 = call i64 @__quantum__rt__array_get_size_1d(%Array* %54) + %56 = sub i64 %55, 1 + %57 = insertvalue %Range zeroinitializer, i64 %56, 0 + %58 = insertvalue %Range %57, i64 -1, 1 + %59 = insertvalue %Range %58, i64 0, 2 + %60 = call %Array* @__quantum__rt__array_slice_1d(%Array* %53, %Range %59, i1 true) + %61 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %62 = sub i64 %61, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %63 = phi i64 [ 0, %exit__1 ], [ %76, %exiting__2 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %63) + %66 = bitcast i8* %65 to { %Array*, double }** + %67 = load { %Array*, double }*, { %Array*, double }** %66, align 8 + %68 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %67, i32 0, i32 0 + %__qsVar8__op__ = load %Array*, %Array** %68, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %69 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %67, i32 0, i32 1 + %__qsVar9__sign__ = load double, double* %69, align 8 + %70 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar10__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %70, %Array* %__qsVar2__idxFermions__, %Array* %__qsVar8__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + %theta = fmul double %__qsVar9__sign__, %__qsVar5__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { %Array*, double, %Array* }* + %73 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 1 + %75 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 2 + store %Array* %__qsVar10__pauliString__, %Array** %73, align 8 + store double %theta, double* %74, align 8 + store %Array* %qubits, %Array** %75, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %72) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %63, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %77 = phi i64 [ 0, %exit__2 ], [ %82, %exiting__3 ] + %78 = icmp sle i64 %77, 1 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %77) + %80 = bitcast i8* %79 to %Array** + %81 = load %Array*, %Array** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %82 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %83 = phi i64 [ 0, %exit__3 ], [ %88, %exiting__4 ] + %84 = icmp sle i64 %83, 1 + br i1 %84, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %83) + %86 = bitcast i8* %85 to %Array** + %87 = load %Array*, %Array** %86, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %88 = add i64 %83, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__signs__, i32 -1) + %89 = call i64 @__quantum__rt__array_get_size_1d(%Array* %53) + %90 = sub i64 %89, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %91 = phi i64 [ 0, %exit__4 ], [ %99, %exiting__5 ] + %92 = icmp sle i64 %91, %90 + br i1 %92, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %91) + %94 = bitcast i8* %93 to { %Array*, double }** + %95 = load { %Array*, double }*, { %Array*, double }** %94, align 8 + %96 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %95, i32 0, i32 0 + %97 = load %Array*, %Array** %96, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %97, i32 -1) + %98 = bitcast { %Array*, double }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %99 = add i64 %91, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + %100 = sub i64 %55, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %101 = phi i64 [ 0, %exit__5 ], [ %109, %exiting__6 ] + %102 = icmp sle i64 %101, %100 + br i1 %102, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %54, i64 %101) + %104 = bitcast i8* %103 to { %Array*, double }** + %105 = load { %Array*, double }*, { %Array*, double }** %104, align 8 + %106 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %105, i32 0, i32 0 + %107 = load %Array*, %Array** %106, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %107, i32 -1) + %108 = bitcast { %Array*, double }* %105 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %108, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %109 = add i64 %101, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__36__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__21__FunctionTable, %Tuple* %11) + %16 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret { %Callable* }* %16 +} + +define internal void @Lifted__PartialApplication__36__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____body({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____adj({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctl(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctladj(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__21__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__21__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %8 = bitcast i8* %7 to i64* + %termType = load i64, i64* %8, align 4 + %9 = icmp eq i64 %termType, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %termType, 2 + br i1 %10, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__termType__ = load i64, i64* %8, align 4 + %9 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %10, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %12 = bitcast i8* %11 to i64* + %termType = load i64, i64* %12, align 4 + %13 = icmp eq i64 %termType, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %termType, 2 + br i1 %19, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__termType__ = load i64, i64* %12, align 4 + %13 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %19, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____body(%Array* %qubitIndices) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array* }* + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %qubitIndices, %Array** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__37__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__22__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + ret %Callable* %5 +} + +define internal void @Lifted__PartialApplication__37__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__22__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__22__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %qubitIndices, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + call void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__body(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj(%Array* %qubitIndices, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + call void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__adj(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %qubitIndices = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %qubitIndices = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____body(%Array* %data, i64 %idx) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %idx) + %15 = bitcast i8* %14 to { { double, double }*, %Array* }** + %16 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %16, i32 0, i32 0 + %18 = load { double, double }*, { double, double }** %17, align 8 + %19 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 0 + %real = load double, double* %19, align 8 + %20 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 1 + %imaginary = load double, double* %20, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %16, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %23 = icmp eq i64 %22, 2 + br i1 %23, label %then0__1, label %test1__1 + +then0__1: ; preds = %exit__1 + %24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 0) + %26 = bitcast i8* %25 to i64* + store i64 0, i64* %26, align 4 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 0) + %29 = bitcast i8* %28 to double* + store double %real, double* %29, align 8 + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array*, %Array* }* + %32 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 1 + store %Array* %24, %Array** %32, align 8 + store %Array* %27, %Array** %33, align 8 + %34 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %31, %Array* %idxFermions) + %35 = sub i64 %0, 1 + br label %header__2 + +test1__1: ; preds = %exit__1 + %36 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %37 = icmp eq i64 %36, 4 + br i1 %37, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i64* + store i64 2, i64* %40, align 4 + %41 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 0) + %43 = bitcast i8* %42 to double* + store double %real, double* %43, align 8 + %44 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %45 = bitcast %Tuple* %44 to { %Array*, %Array* }* + %46 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %45, i32 0, i32 0 + %47 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %45, i32 0, i32 1 + store %Array* %38, %Array** %46, align 8 + store %Array* %41, %Array** %47, align 8 + %48 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %45, %Array* %idxFermions) + %49 = sub i64 %0, 1 + br label %header__3 + +else__1: ; preds = %test1__1 + %50 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 0) + %52 = bitcast i8* %51 to i64* + store i64 -1, i64* %52, align 4 + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to double* + store double 0.000000e+00, double* %55, align 8 + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { %Array*, %Array* }* + %58 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %57, i32 0, i32 1 + store %Array* %50, %Array** %58, align 8 + store %Array* %53, %Array** %59, align 8 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i64* + store i64 0, i64* %62, align 4 + %63 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %57, %Array* %60) + %64 = sub i64 %0, 1 + br label %header__4 + +continue__1: ; No predecessors! + unreachable + +header__2: ; preds = %exiting__2, %then0__1 + %65 = phi i64 [ 0, %then0__1 ], [ %76, %exiting__2 ] + %66 = icmp sle i64 %65, %35 + br i1 %66, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %65) + %68 = bitcast i8* %67 to { { double, double }*, %Array* }** + %69 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %68, align 8 + %70 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %69, i32 0, i32 0 + %71 = load { double, double }*, { double, double }** %70, align 8 + %72 = bitcast { double, double }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %72, i32 -1) + %73 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %69, i32 0, i32 1 + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + %75 = bitcast { { double, double }*, %Array* }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %75, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %65, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %34 + +header__3: ; preds = %exiting__3, %then1__1 + %77 = phi i64 [ 0, %then1__1 ], [ %88, %exiting__3 ] + %78 = icmp sle i64 %77, %49 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %77) + %80 = bitcast i8* %79 to { { double, double }*, %Array* }** + %81 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %80, align 8 + %82 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %81, i32 0, i32 0 + %83 = load { double, double }*, { double, double }** %82, align 8 + %84 = bitcast { double, double }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %84, i32 -1) + %85 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %81, i32 0, i32 1 + %86 = load %Array*, %Array** %85, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %86, i32 -1) + %87 = bitcast { { double, double }*, %Array* }* %81 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %87, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %88 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %44, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %48 + +header__4: ; preds = %exiting__4, %else__1 + %89 = phi i64 [ 0, %else__1 ], [ %100, %exiting__4 ] + %90 = icmp sle i64 %89, %64 + br i1 %90, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %89) + %92 = bitcast i8* %91 to { { double, double }*, %Array* }** + %93 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %92, align 8 + %94 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %93, i32 0, i32 0 + %95 = load { double, double }*, { double, double }** %94, align 8 + %96 = bitcast { double, double }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %96, i32 -1) + %97 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %93, i32 0, i32 1 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 -1) + %99 = bitcast { { double, double }*, %Array* }* %93 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %99, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %100 = add i64 %89, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %63 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerBitString__body(i64 %nFermions, %Array* %idxFermions) { +entry: + %zString = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %1 = srem i64 %0, 2 + %2 = icmp ne i64 %1, 0 + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([86 x i8], [86 x i8]* @26, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nFermions) + %5 = sub i64 %nFermions, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %6 = phi i64 [ 0, %continue__1 ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %6) + %9 = bitcast i8* %8 to i1* + store i1 false, i1* %9, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %4, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %24, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %13) + %16 = bitcast i8* %15 to i64* + %fermionIdx = load i64, i64* %16, align 4 + %17 = icmp sge i64 %fermionIdx, %nFermions + br i1 %17, label %then0__2, label %continue__2 + +then0__2: ; preds = %body__2 + %18 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @27, i32 0, i32 0)) + %19 = call %String* @__quantum__rt__int_to_string(i64 %fermionIdx) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @28, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__fail(%String* %22) + unreachable + +continue__2: ; preds = %body__2 + br label %header__3 + +exiting__2: ; preds = %exit__3 + %24 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %25 = sub i64 %11, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %continue__2 + %idx = phi i64 [ 0, %continue__2 ], [ %35, %exiting__3 ] + %26 = icmp sle i64 %idx, %fermionIdx + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = call %Array* @__quantum__rt__array_copy(%Array* %27, i1 false) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %idx) + %30 = bitcast i8* %29 to i1* + %31 = load i1, i1* %30, align 1 + %32 = xor i1 %31, true + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %idx) + %34 = bitcast i8* %33 to i1* + store i1 %32, i1* %34, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + store %Array* %28, %Array** %zString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + br label %exiting__2 + +header__4: ; preds = %exiting__4, %exit__2 + %36 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__4 ] + %37 = icmp sle i64 %36, %25 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %36) + %39 = bitcast i8* %38 to i64* + %fermionIdx__1 = load i64, i64* %39, align 4 + %40 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + %41 = call %Array* @__quantum__rt__array_copy(%Array* %40, i1 false) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %fermionIdx__1) + %43 = bitcast i8* %42 to i1* + store i1 false, i1* %43, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + store %Array* %41, %Array** %zString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %44 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %45 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + ret %Array* %45 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliZString__body(i64 %nFermions, %Array* %idxFermions) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %bitString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerBitString__body(i64 %nFermions, %Array* %idxFermions) + call void @__quantum__rt__array_update_alias_count(%Array* %bitString, i32 1) + %0 = call %Array* @Microsoft__Quantum__Convert__BoolArrayAsPauli__body(i2 -2, i1 true, %Array* %bitString) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bitString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bitString, i32 -1) + ret %Array* %0 +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorEvolutionSet__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array* }*, %Array* }* + %1 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____body({ { %Array*, %Array* }*, %Array* }* %0) + %2 = bitcast %Tuple* %result-tuple to { { %Callable* }* }* + %3 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %2, i32 0, i32 0 + store { %Callable* }* %1, { %Callable* }** %3, align 8 + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorGeneratorSystem__body(%Array* %data) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %16) + %19 = bitcast i8* %18 to { { double, double }*, %Array* }** + %20 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 0 + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 1) + %24 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 1) + %26 = bitcast { { double, double }*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %data, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, %Array* }* + %30 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %29, i32 0, i32 1 + store %Callable* %14, %Callable** %30, align 8 + store %Array* %data, %Array** %31, align 8 + %32 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__38__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__23__FunctionTable, %Tuple* %28) + %33 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %0, %Callable* %32) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %46, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %35) + %38 = bitcast i8* %37 to { { double, double }*, %Array* }** + %39 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %39, i32 0, i32 0 + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %43 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %39, i32 0, i32 1 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %45 = bitcast { { double, double }*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret { i64, %Callable* }* %33 +} + +define internal void @Lifted__PartialApplication__38__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64 }* + %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, i64 }* getelementptr ({ %Array*, i64 }, { %Array*, i64 }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, i64 }* + %8 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store i64 %5, i64* %9, align 4 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load i64, i64* %2, align 4 + %5 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____body(%Array* %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %7 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %6, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %5, { { %Array*, %Array* }*, %Array* }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__23__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__23__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %data) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 0 + %ZData = load %Array*, %Array** %0, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZData) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %3) + %6 = bitcast i8* %5 to { %Array*, %Array* }** + %7 = load { %Array*, %Array* }*, { %Array*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array*, %Array* }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %14 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 1 + %ZZData = load %Array*, %Array** %14, align 8 + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZZData) + %16 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %17) + %20 = bitcast i8* %19 to { %Array*, %Array* }** + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %28 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 2 + %PQandPQQRData = load %Array*, %Array** %28, align 8 + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %PQandPQQRData) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %31) + %34 = bitcast i8* %33 to { %Array*, %Array* }** + %35 = load { %Array*, %Array* }*, { %Array*, %Array* }** %34, align 8 + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = load %Array*, %Array** %36, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %37, i32 1) + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + %39 = load %Array*, %Array** %38, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 1) + %40 = bitcast { %Array*, %Array* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %42 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 3 + %h0123Data = load %Array*, %Array** %42, align 8 + %43 = call i64 @__quantum__rt__array_get_size_1d(%Array* %h0123Data) + %44 = sub i64 %43, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %55, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %45) + %48 = bitcast i8* %47 to { %Array*, %Array* }** + %49 = load { %Array*, %Array* }*, { %Array*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 0 + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 1) + %52 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 1 + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + %54 = bitcast { %Array*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %55 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %56 = bitcast { %Array*, %Array*, %Array*, %Array* }* %data to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = sub i64 %1, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %58 = phi i64 [ 0, %exit__4 ], [ %68, %exiting__5 ] + %59 = icmp sle i64 %58, %57 + br i1 %59, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %58) + %61 = bitcast i8* %60 to { %Array*, %Array* }** + %62 = load { %Array*, %Array* }*, { %Array*, %Array* }** %61, align 8 + %63 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 0 + %64 = load %Array*, %Array** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %65 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + %67 = bitcast { %Array*, %Array* }* %62 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %68 = add i64 %58, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %69 = sub i64 %15, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %70 = phi i64 [ 0, %exit__5 ], [ %80, %exiting__6 ] + %71 = icmp sle i64 %70, %69 + br i1 %71, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %70) + %73 = bitcast i8* %72 to { %Array*, %Array* }** + %74 = load { %Array*, %Array* }*, { %Array*, %Array* }** %73, align 8 + %75 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 0 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 1 + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %78, i32 1) + %79 = bitcast { %Array*, %Array* }* %74 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %70, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %81 = sub i64 %29, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %82 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %82) + %85 = bitcast i8* %84 to { %Array*, %Array* }** + %86 = load { %Array*, %Array* }*, { %Array*, %Array* }** %85, align 8 + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + %89 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 1 + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 1) + %91 = bitcast { %Array*, %Array* }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %82, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %93 = sub i64 %43, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %94 = phi i64 [ 0, %exit__7 ], [ %104, %exiting__8 ] + %95 = icmp sle i64 %94, %93 + br i1 %95, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %94) + %97 = bitcast i8* %96 to { %Array*, %Array* }** + %98 = load { %Array*, %Array* }*, { %Array*, %Array* }** %97, align 8 + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 1 + %102 = load %Array*, %Array** %101, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %103 = bitcast { %Array*, %Array* }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %104 = add i64 %94, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %105 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i64* + store i64 0, i64* %107, align 4 + %ZGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %ZData, %Array* %105) + %108 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %ZGenSys, i32 0, i32 1 + %109 = load %Callable*, %Callable** %108, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %109, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %109, i32 1) + %110 = bitcast { i64, %Callable* }* %ZGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %110, i32 1) + %111 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %111, i64 0) + %113 = bitcast i8* %112 to i64* + store i64 1, i64* %113, align 4 + %ZZGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %ZZData, %Array* %111) + %114 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %ZZGenSys, i32 0, i32 1 + %115 = load %Callable*, %Callable** %114, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %115, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %115, i32 1) + %116 = bitcast { i64, %Callable* }* %ZZGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + %117 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %117, i64 0) + %119 = bitcast i8* %118 to i64* + store i64 2, i64* %119, align 4 + %PQandPQQRGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %PQandPQQRData, %Array* %117) + %120 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %PQandPQQRGenSys, i32 0, i32 1 + %121 = load %Callable*, %Callable** %120, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %121, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %121, i32 1) + %122 = bitcast { i64, %Callable* }* %PQandPQQRGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 1) + %123 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %123, i64 0) + %125 = bitcast i8* %124 to i64* + store i64 3, i64* %125, align 4 + %h0123GenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %h0123Data, %Array* %123) + %126 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %h0123GenSys, i32 0, i32 1 + %127 = load %Callable*, %Callable** %126, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %127, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %127, i32 1) + %128 = bitcast { i64, %Callable* }* %h0123GenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %109, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %109, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %115, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %115, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %121, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %127, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %127, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + %129 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %130 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 0) + %131 = bitcast i8* %130 to { i64, %Callable* }** + %132 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 1) + %133 = bitcast i8* %132 to { i64, %Callable* }** + %134 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 2) + %135 = bitcast i8* %134 to { i64, %Callable* }** + %136 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 3) + %137 = bitcast i8* %136 to { i64, %Callable* }** + store { i64, %Callable* }* %ZGenSys, { i64, %Callable* }** %131, align 8 + store { i64, %Callable* }* %ZZGenSys, { i64, %Callable* }** %133, align 8 + store { i64, %Callable* }* %PQandPQQRGenSys, { i64, %Callable* }** %135, align 8 + store { i64, %Callable* }* %h0123GenSys, { i64, %Callable* }** %137, align 8 + %138 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__SumGeneratorSystems__body(%Array* %129) + %139 = sub i64 %1, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %140 = phi i64 [ 0, %exit__8 ], [ %150, %exiting__9 ] + %141 = icmp sle i64 %140, %139 + br i1 %141, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %140) + %143 = bitcast i8* %142 to { %Array*, %Array* }** + %144 = load { %Array*, %Array* }*, { %Array*, %Array* }** %143, align 8 + %145 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %144, i32 0, i32 0 + %146 = load %Array*, %Array** %145, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %146, i32 -1) + %147 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %144, i32 0, i32 1 + %148 = load %Array*, %Array** %147, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %148, i32 -1) + %149 = bitcast { %Array*, %Array* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %149, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %150 = add i64 %140, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %151 = sub i64 %15, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %152 = phi i64 [ 0, %exit__9 ], [ %162, %exiting__10 ] + %153 = icmp sle i64 %152, %151 + br i1 %153, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %152) + %155 = bitcast i8* %154 to { %Array*, %Array* }** + %156 = load { %Array*, %Array* }*, { %Array*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 0 + %158 = load %Array*, %Array** %157, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %158, i32 -1) + %159 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 1 + %160 = load %Array*, %Array** %159, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %160, i32 -1) + %161 = bitcast { %Array*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %162 = add i64 %152, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %163 = sub i64 %29, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %164 = phi i64 [ 0, %exit__10 ], [ %174, %exiting__11 ] + %165 = icmp sle i64 %164, %163 + br i1 %165, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %164) + %167 = bitcast i8* %166 to { %Array*, %Array* }** + %168 = load { %Array*, %Array* }*, { %Array*, %Array* }** %167, align 8 + %169 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %168, i32 0, i32 0 + %170 = load %Array*, %Array** %169, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %170, i32 -1) + %171 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %168, i32 0, i32 1 + %172 = load %Array*, %Array** %171, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %172, i32 -1) + %173 = bitcast { %Array*, %Array* }* %168 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %173, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %174 = add i64 %164, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %175 = sub i64 %43, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %176 = phi i64 [ 0, %exit__11 ], [ %186, %exiting__12 ] + %177 = icmp sle i64 %176, %175 + br i1 %177, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %176) + %179 = bitcast i8* %178 to { %Array*, %Array* }** + %180 = load { %Array*, %Array* }*, { %Array*, %Array* }** %179, align 8 + %181 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %180, i32 0, i32 0 + %182 = load %Array*, %Array** %181, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 -1) + %183 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %180, i32 0, i32 1 + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %184, i32 -1) + %185 = bitcast { %Array*, %Array* }* %180 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %185, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %186 = add i64 %176, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + %187 = sub i64 %1, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %188 = phi i64 [ 0, %exit__12 ], [ %198, %exiting__13 ] + %189 = icmp sle i64 %188, %187 + br i1 %189, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %188) + %191 = bitcast i8* %190 to { %Array*, %Array* }** + %192 = load { %Array*, %Array* }*, { %Array*, %Array* }** %191, align 8 + %193 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %192, i32 0, i32 0 + %194 = load %Array*, %Array** %193, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %194, i32 -1) + %195 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %192, i32 0, i32 1 + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %196, i32 -1) + %197 = bitcast { %Array*, %Array* }* %192 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %197, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %198 = add i64 %188, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %199 = sub i64 %15, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %200 = phi i64 [ 0, %exit__13 ], [ %210, %exiting__14 ] + %201 = icmp sle i64 %200, %199 + br i1 %201, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %202 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %200) + %203 = bitcast i8* %202 to { %Array*, %Array* }** + %204 = load { %Array*, %Array* }*, { %Array*, %Array* }** %203, align 8 + %205 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %204, i32 0, i32 0 + %206 = load %Array*, %Array** %205, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 -1) + %207 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %204, i32 0, i32 1 + %208 = load %Array*, %Array** %207, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %208, i32 -1) + %209 = bitcast { %Array*, %Array* }* %204 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %210 = add i64 %200, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %211 = sub i64 %29, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %212 = phi i64 [ 0, %exit__14 ], [ %222, %exiting__15 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %212) + %215 = bitcast i8* %214 to { %Array*, %Array* }** + %216 = load { %Array*, %Array* }*, { %Array*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 0 + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + %219 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 1 + %220 = load %Array*, %Array** %219, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %220, i32 -1) + %221 = bitcast { %Array*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %221, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %222 = add i64 %212, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %223 = sub i64 %43, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %224 = phi i64 [ 0, %exit__15 ], [ %234, %exiting__16 ] + %225 = icmp sle i64 %224, %223 + br i1 %225, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %226 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %224) + %227 = bitcast i8* %226 to { %Array*, %Array* }** + %228 = load { %Array*, %Array* }*, { %Array*, %Array* }** %227, align 8 + %229 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + %231 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 1 + %232 = load %Array*, %Array** %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %232, i32 -1) + %233 = bitcast { %Array*, %Array* }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %233, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %234 = add i64 %224, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %109, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %109, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %110, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %115, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %115, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %121, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %121, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %127, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %127, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %109, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %109, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %115, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %115, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %117, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %121, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %123, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %127, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %127, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 -1) + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %235 = phi i64 [ 0, %exit__16 ], [ %243, %exiting__17 ] + %236 = icmp sle i64 %235, 3 + br i1 %236, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %235) + %238 = bitcast i8* %237 to { i64, %Callable* }** + %239 = load { i64, %Callable* }*, { i64, %Callable* }** %238, align 8 + %240 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %239, i32 0, i32 1 + %241 = load %Callable*, %Callable** %240, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %241, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %241, i32 -1) + %242 = bitcast { i64, %Callable* }* %239 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %242, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %243 = add i64 %235, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 -1) + ret { i64, %Callable* }* %138 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %data, %Array* %termType) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %15) + %18 = bitcast i8* %17 to { %Array*, %Array* }** + %19 = load { %Array*, %Array* }*, { %Array*, %Array* }** %18, align 8 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 0 + %21 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 1) + %24 = bitcast { %Array*, %Array* }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, %Array*, %Array* }* + %28 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 2 + store %Callable* %13, %Callable** %28, align 8 + store %Array* %data, %Array** %29, align 8 + store %Array* %termType, %Array** %30, align 8 + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__43__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__28__FunctionTable, %Tuple* %26) + %32 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %0, %Callable* %31) + %33 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %34) + %37 = bitcast i8* %36 to { %Array*, %Array* }** + %38 = load { %Array*, %Array* }*, { %Array*, %Array* }** %37, align 8 + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 1 + %42 = load %Array*, %Array** %41, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + %43 = bitcast { %Array*, %Array* }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + ret { i64, %Callable* }* %32 +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSparseMultiConfigurationalState__body(%Callable* %initialStatePreparation, %Array* %excitations, %Array* %qubits) { +entry: + %success = alloca i1, align 1 + %applyFlips = alloca %Array*, align 8 + %coefficientsNewComplexPolar = alloca %Array*, align 8 + %coefficientsSqrtAbs = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 1) + %nExcitations = call i64 @__quantum__rt__array_get_size_1d(%Array* %excitations) + %0 = sub i64 %nExcitations, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %1) + %4 = bitcast i8* %3 to { { double, double }*, %Array* }** + %5 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %4, align 8 + %6 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %5, i32 0, i32 0 + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %5, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { { double, double }*, %Array* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %excitations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %13 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %14 = sub i64 %nExcitations, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %15) + %18 = bitcast i8* %17 to double* + store double 0.000000e+00, double* %18, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %13, %Array** %coefficientsSqrtAbs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %20 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %22 = sub i64 %nExcitations, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %23) + %26 = bitcast i8* %25 to { double, double }** + store { double, double }* %20, { double, double }** %26, align 8 + %27 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %21, %Array** %coefficientsNewComplexPolar, align 8 + %29 = sub i64 %nExcitations, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %30 = phi i64 [ 0, %exit__3 ], [ %36, %exiting__4 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %30) + %33 = bitcast i8* %32 to { double, double }** + %34 = load { double, double }*, { double, double }** %33, align 8 + %35 = bitcast { double, double }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %36 = add i64 %30, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %39 = sub i64 %nExcitations, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %40 = phi i64 [ 0, %exit__4 ], [ %44, %exiting__5 ] + %41 = icmp sle i64 %40, %39 + br i1 %41, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %40) + %43 = bitcast i8* %42 to %Array** + store %Array* %37, %Array** %43, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %44 = add i64 %40, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + store %Array* %38, %Array** %applyFlips, align 8 + %45 = sub i64 %nExcitations, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %46 = phi i64 [ 0, %exit__5 ], [ %51, %exiting__6 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %46) + %49 = bitcast i8* %48 to %Array** + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %51 = add i64 %46, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 1) + %52 = sub i64 %nExcitations, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %idx = phi i64 [ 0, %exit__6 ], [ %94, %exiting__7 ] + %53 = icmp sle i64 %idx, %52 + br i1 %53, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %idx) + %55 = bitcast i8* %54 to { { double, double }*, %Array* }** + %56 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %55, align 8 + %57 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %56, i32 0, i32 0 + %x = load { double, double }*, { double, double }** %57, align 8 + %58 = bitcast { double, double }* %x to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 1) + %59 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %56, i32 0, i32 1 + %excitation = load %Array*, %Array** %59, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 1) + %60 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 -1) + %61 = call %Array* @__quantum__rt__array_copy(%Array* %60, i1 false) + %62 = getelementptr inbounds { double, double }, { double, double }* %x, i32 0, i32 0 + %63 = getelementptr inbounds { double, double }, { double, double }* %x, i32 0, i32 1 + %64 = load double, double* %62, align 8 + %65 = load double, double* %63, align 8 + %66 = call { double, double }* @Microsoft__Quantum__Math__Complex__body(double %64, double %65) + %67 = call { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %66) + %d = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %67) + %68 = call double @__quantum__qis__sqrt__body(double %d) + %69 = bitcast { double, double }* %66 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %69, i32 -1) + %70 = bitcast { double, double }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %70, i32 -1) + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %idx) + %72 = bitcast i8* %71 to double* + store double %68, double* %72, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %61, i32 1) + store %Array* %61, %Array** %coefficientsSqrtAbs, align 8 + %73 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 -1) + %74 = call %Array* @__quantum__rt__array_copy(%Array* %73, i1 false) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %idx) + %76 = bitcast i8* %75 to double* + %77 = load double, double* %76, align 8 + %78 = call { double, double }* @Microsoft__Quantum__Math__Complex__body(double %64, double %65) + %79 = call { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %78) + %80 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %79) + %81 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %77, double %80) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 %idx) + %83 = bitcast i8* %82 to { double, double }** + %84 = bitcast { double, double }* %81 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %84, i32 1) + %85 = load { double, double }*, { double, double }** %83, align 8 + %86 = bitcast { double, double }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 -1) + store { double, double }* %81, { double, double }** %83, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 1) + store %Array* %74, %Array** %coefficientsNewComplexPolar, align 8 + %87 = load %Array*, %Array** %applyFlips, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %87, i32 -1) + %88 = call %Array* @__quantum__rt__array_copy(%Array* %87, i1 false) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 %idx) + %90 = bitcast i8* %89 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %excitation, i32 1) + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %91, i32 -1) + store %Array* %excitation, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + store %Array* %88, %Array** %applyFlips, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 -1) + %92 = bitcast { double, double }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %92, i32 -1) + %93 = bitcast { double, double }* %79 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %93, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %94 = add i64 %idx, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + %95 = sitofp i64 %nExcitations to double + %96 = call double @Microsoft__Quantum__Math__Lg__body(double %95) + %nBitsIndices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %96) + br label %repeat__1 + +repeat__1: ; preds = %fixup__1, %exit__7 + store i1 false, i1* %success, align 1 + %97 = add i64 %nBitsIndices, 1 + %auxillary = call %Array* @__quantum__rt__qubit_allocate_array(i64 %97) + call void @__quantum__rt__array_update_alias_count(%Array* %auxillary, i32 1) + %flag = call %Qubit* @__quantum__rt__qubit_allocate() + %98 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %99 = load %Array*, %Array** %applyFlips, align 8 + %100 = call %Array* @Microsoft__Quantum__Arrays___ac214dcd588b470fb29f1cc67e145065_Mapped__body(%Callable* %98, %Array* %99) + %101 = call %Callable* @Microsoft__Quantum__Arrays___fc3dc354bc024fd5b7f38df86565fb27_LookupFunction__body(%Array* %100) + %multiplexer = call %Callable* @Microsoft__Quantum__Canon__MultiplexerBruteForceFromGenerator__body(i64 %nExcitations, %Callable* %101) + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 1) + %102 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + %103 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__body(%Array* %102, { %Array* }* %103) + %104 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %105 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %106 = bitcast %Tuple* %105 to { { %Array* }*, %Array* }* + %107 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %106, i32 0, i32 0 + %108 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %106, i32 0, i32 1 + store { %Array* }* %104, { %Array* }** %107, align 8 + store %Array* %qubits, %Array** %108, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %multiplexer, %Tuple* %105, %Tuple* null) + %109 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + %110 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %109, { %Array* }* %110) + %111 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %112 = call %Callable* @Microsoft__Quantum__Canon___79e0da793bac4e01ba7a8549000baf29_ControlledOnInt__body(i64 0, %Callable* %111) + call void @__quantum__rt__array_update_reference_count(%Array* %auxillary, i32 1) + %113 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %114 = bitcast %Tuple* %113 to { %Array*, %Qubit* }* + %115 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %114, i32 0, i32 0 + %116 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %114, i32 0, i32 1 + store %Array* %auxillary, %Array** %115, align 8 + store %Qubit* %flag, %Qubit** %116, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %112, %Tuple* %113, %Tuple* null) + %outcome = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %flag) + %117 = call %Result* @__quantum__rt__result_get_one() + %118 = call i1 @__quantum__rt__result_equal(%Result* %outcome, %Result* %117) + store i1 %118, i1* %success, align 1 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %auxillary) + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %flag) + %119 = getelementptr inbounds { %Array* }, { %Array* }* %103, i32 0, i32 0 + %120 = load %Array*, %Array** %119, align 8 + %121 = getelementptr inbounds { %Array* }, { %Array* }* %104, i32 0, i32 0 + %122 = load %Array*, %Array** %121, align 8 + %123 = getelementptr inbounds { %Array* }, { %Array* }* %110, i32 0, i32 0 + %124 = load %Array*, %Array** %123, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %98, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %98, i32 -1) + %125 = call i64 @__quantum__rt__array_get_size_1d(%Array* %100) + %126 = sub i64 %125, 1 + br label %header__8 + +until__1: ; preds = %exit__8 + br i1 %118, label %rend__1, label %fixup__1 + +fixup__1: ; preds = %until__1 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) + br label %repeat__1 + +rend__1: ; preds = %until__1 + %127 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + %128 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + %129 = load %Array*, %Array** %applyFlips, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + %130 = sub i64 %nExcitations, 1 + br label %header__9 + +header__8: ; preds = %exiting__8, %repeat__1 + %131 = phi i64 [ 0, %repeat__1 ], [ %136, %exiting__8 ] + %132 = icmp sle i64 %131, %126 + br i1 %132, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 %131) + %134 = bitcast i8* %133 to %Callable** + %135 = load %Callable*, %Callable** %134, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %135, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %135, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %136 = add i64 %131, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + %137 = bitcast { %Array* }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %137, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %122, i32 -1) + %138 = bitcast { %Array* }* %104 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %105, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %124, i32 -1) + %139 = bitcast { %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %139, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %112, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %112, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxillary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %113, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %outcome, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %flag) + call void @__quantum__rt__array_update_alias_count(%Array* %auxillary, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %auxillary) + br label %until__1 + +header__9: ; preds = %exiting__9, %rend__1 + %140 = phi i64 [ 0, %rend__1 ], [ %151, %exiting__9 ] + %141 = icmp sle i64 %140, %130 + br i1 %141, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %140) + %143 = bitcast i8* %142 to { { double, double }*, %Array* }** + %144 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %143, align 8 + %145 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %144, i32 0, i32 0 + %146 = load { double, double }*, { double, double }** %145, align 8 + %147 = bitcast { double, double }* %146 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %147, i32 -1) + %148 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %144, i32 0, i32 1 + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 -1) + %150 = bitcast { { double, double }*, %Array* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %150, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %151 = add i64 %140, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %excitations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 -1) + %152 = call i64 @__quantum__rt__array_get_size_1d(%Array* %128) + %153 = sub i64 %152, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %154 = phi i64 [ 0, %exit__9 ], [ %160, %exiting__10 ] + %155 = icmp sle i64 %154, %153 + br i1 %155, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %156 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %154) + %157 = bitcast i8* %156 to { double, double }** + %158 = load { double, double }*, { double, double }** %157, align 8 + %159 = bitcast { double, double }* %158 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %160 = add i64 %154, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %128, i32 -1) + %161 = call i64 @__quantum__rt__array_get_size_1d(%Array* %129) + %162 = sub i64 %161, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %163) + %166 = bitcast i8* %165 to %Array** + %167 = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %129, i32 -1) + %169 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %169, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %127, i32 -1) + %170 = sub i64 %152, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %177, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %171) + %174 = bitcast i8* %173 to { double, double }** + %175 = load { double, double }*, { double, double }** %174, align 8 + %176 = bitcast { double, double }* %175 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %176, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %177 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %128, i32 -1) + %178 = sub i64 %161, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %179 = phi i64 [ 0, %exit__12 ], [ %184, %exiting__13 ] + %180 = icmp sle i64 %179, %178 + br i1 %180, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %179) + %182 = bitcast i8* %181 to %Array** + %183 = load %Array*, %Array** %182, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %183, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %184 = add i64 %179, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 -1) + ret void +} + +declare void @__quantum__rt__qubit_release(%Qubit*) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Callable* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____body(%Array* %2) + %4 = bitcast %Tuple* %result-tuple to { %Callable* }* + %5 = getelementptr inbounds { %Callable* }, { %Callable* }* %4, i32 0, i32 0 + store %Callable* %3, %Callable** %5, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %stateData, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %stateData, i32 0, i32 1 + %terms = load %Array*, %Array** %0, align 8 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %terms) + %1 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 1) + %14 = bitcast { i64, %Array* }* %stateData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %15 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %stateData, i32 0, i32 0 + %stateType = load i64, i64* %15, align 4 + %16 = sub i64 %nTerms, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %17) + %20 = bitcast i8* %19 to { { double, double }*, %Array* }** + %21 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %21, i32 0, i32 0 + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %21, i32 0, i32 1 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = bitcast { { double, double }*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 1) + %29 = icmp eq i64 %stateType, 2 + br i1 %29, label %then0__1, label %test1__1 + +then0__1: ; preds = %exit__2 + %30 = call i1 @Microsoft__Quantum__Arrays___d03f28613a2a406a92da3539b001d776_IsEmpty__body(%Array* %terms) + br i1 %30, label %then0__2, label %test1__2 + +then0__2: ; preds = %then0__1 + br label %continue__2 + +test1__2: ; preds = %then0__1 + %31 = icmp eq i64 %nTerms, 1 + br i1 %31, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 0) + %33 = bitcast i8* %32 to { { double, double }*, %Array* }** + %34 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %33, align 8 + %35 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %34, i32 0, i32 0 + %coefficient = load { double, double }*, { double, double }** %35, align 8 + %36 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 1) + %37 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %34, i32 0, i32 1 + %qubitIndices = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %qubitIndices, %Array* %qubits) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + br label %continue__2 + +else__1: ; preds = %test1__2 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSparseMultiConfigurationalState__body(%Callable* %38, %Array* %terms, %Array* %qubits) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %38, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %then1__1, %then0__2 + br label %continue__1 + +test1__1: ; preds = %exit__2 + %39 = icmp eq i64 %stateType, 3 + br i1 %39, label %then1__2, label %continue__1 + +then1__2: ; preds = %test1__1 + %40 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %41 = sub i64 %nTerms, 1 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %41) + %43 = bitcast i8* %42 to { { double, double }*, %Array* }** + %44 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %43, align 8 + %45 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %44, i32 0, i32 0 + %46 = load { double, double }*, { double, double }** %45, align 8 + %47 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %44, i32 0, i32 1 + %48 = load %Array*, %Array** %47, align 8 + %49 = bitcast { double, double }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %48, i32 1) + %50 = bitcast { { double, double }*, %Array* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 1) + %51 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to { { double, double }*, %Array* }** + store { { double, double }*, %Array* }* %44, { { double, double }*, %Array* }** %53, align 8 + %54 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %55 = bitcast %Tuple* %54 to { %Callable*, i64, %Array* }* + %56 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 0 + %57 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 1 + %58 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 2 + store %Callable* %40, %Callable** %56, align 8 + store i64 2, i64* %57, align 4 + store %Array* %51, %Array** %58, align 8 + %referenceState = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__39__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__24__FunctionTable, %Tuple* %54) + call void @__quantum__rt__capture_update_alias_count(%Callable* %referenceState, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %referenceState, i32 1) + %59 = sub i64 %nTerms, 2 + %60 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %59, 2 + %61 = call %Array* @__quantum__rt__array_slice_1d(%Array* %terms, %Range %60, i1 true) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareUnitaryCoupledClusterState__body(%Callable* %referenceState, %Array* %61, double 1.000000e+00, %Array* %qubits) + call void @__quantum__rt__capture_update_alias_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %61, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__2, %test1__1, %continue__2 + %62 = sub i64 %nTerms, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %continue__1 + %63 = phi i64 [ 0, %continue__1 ], [ %74, %exiting__3 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %63) + %66 = bitcast i8* %65 to { { double, double }*, %Array* }** + %67 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %66, align 8 + %68 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %67, i32 0, i32 0 + %69 = load { double, double }*, { double, double }** %68, align 8 + %70 = bitcast { double, double }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + %71 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %67, i32 0, i32 1 + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = bitcast { { double, double }*, %Array* }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %74 = add i64 %63, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %75 = sub i64 %nTerms, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %76 = phi i64 [ 0, %exit__3 ], [ %87, %exiting__4 ] + %77 = icmp sle i64 %76, %75 + br i1 %77, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %76) + %79 = bitcast i8* %78 to { { double, double }*, %Array* }** + %80 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %79, align 8 + %81 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %80, i32 0, i32 0 + %82 = load { double, double }*, { double, double }** %81, align 8 + %83 = bitcast { double, double }* %82 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %83, i32 -1) + %84 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %80, i32 0, i32 1 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { { double, double }*, %Array* }* %80 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %87 = add i64 %76, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__body(%Array* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__adj(%Array* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctl(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctladj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Lifted__PartialApplication__39__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Array* }* + %7 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Array* %4, %Array** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Array* }* + %10 = getelementptr inbounds { %Array* }, { %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %13, i32 0, i32 1 + store { i64, %Array* }* %6, { i64, %Array* }** %14, align 8 + store %Array* %11, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @MemoryManagement__24__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__24__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareUnitaryCoupledClusterState__body(%Callable* %initialStatePreparation, %Array* %clusterOperator, double %trotterStepSize, %Array* %qubits) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %clusterOperator) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %clusterOperator, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %clusterOperator, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %clusterOperatorGeneratorSystem = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorGeneratorSystem__body(%Array* %clusterOperator) + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %clusterOperatorGeneratorSystem, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 1) + %16 = bitcast { i64, %Callable* }* %clusterOperatorGeneratorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorEvolutionSet__body() + %evolutionGenerator = call { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %17, { i64, %Callable* }* %clusterOperatorGeneratorSystem) + %18 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %19 = load { %Callable* }*, { %Callable* }** %18, align 8 + %20 = getelementptr inbounds { %Callable* }, { %Callable* }* %19, i32 0, i32 0 + %21 = load %Callable*, %Callable** %20, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 1) + %22 = bitcast { %Callable* }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + %23 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %24 = load { i64, %Callable* }*, { i64, %Callable* }** %23, align 8 + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 1) + %27 = bitcast { i64, %Callable* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + %28 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + %29 = call { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 1) + %30 = getelementptr inbounds { %Callable* }, { %Callable* }* %29, i32 0, i32 0 + %simulationAlgorithm = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %33 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 1 + %35 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 2 + store %Callable* %simulationAlgorithm, %Callable** %33, align 8 + store double 1.000000e+00, double* %34, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %35, align 8 + %oracle = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__40__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__25__FunctionTable, %Tuple* %31) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array* }* + %38 = getelementptr inbounds { %Array* }, { %Array* }* %37, i32 0, i32 0 + store %Array* %qubits, %Array** %38, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %initialStatePreparation, %Tuple* %36, %Tuple* null) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Array* }* + %41 = getelementptr inbounds { %Array* }, { %Array* }* %40, i32 0, i32 0 + store %Array* %qubits, %Array** %41, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %oracle, %Tuple* %39, %Tuple* null) + %42 = getelementptr inbounds { %Callable* }, { %Callable* }* %17, i32 0, i32 0 + %43 = load %Callable*, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + %44 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %45 = phi i64 [ 0, %exit__1 ], [ %56, %exiting__2 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %clusterOperator, i64 %45) + %48 = bitcast i8* %47 to { { double, double }*, %Array* }** + %49 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %49, i32 0, i32 0 + %51 = load { double, double }*, { double, double }** %50, align 8 + %52 = bitcast { double, double }* %51 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + %53 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %49, i32 0, i32 1 + %54 = load %Array*, %Array** %53, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %54, i32 -1) + %55 = bitcast { { double, double }*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %56 = add i64 %45, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %clusterOperator, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %43, i32 -1) + %57 = bitcast { %Callable* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %57, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + %58 = bitcast { %Callable* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %58, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @MemoryManagement__25__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__25__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____body({ i64, %Array* }* %inputState, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { double, double }*, %Array* }** + %8 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 0 + %10 = load { double, double }*, { double, double }** %9, align 8 + %11 = bitcast { double, double }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { { double, double }*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %inputState, %Array* %qubits) + %17 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %18) + %21 = bitcast i8* %20 to { { double, double }*, %Array* }** + %22 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 0 + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + %26 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = bitcast { { double, double }*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____adj({ i64, %Array* }* %inputState, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { double, double }*, %Array* }** + %8 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 0 + %10 = load { double, double }*, { double, double }** %9, align 8 + %11 = bitcast { double, double }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { { double, double }*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) + %17 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %18) + %21 = bitcast i8* %20 to { { double, double }*, %Array* }** + %22 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 0 + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + %26 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = bitcast { { double, double }*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__MeasurementOperators__body(i64 %nQubits, %Array* %indices, i64 %termType) { +entry: + %op__2 = alloca %Array*, align 8 + %compactOp__1 = alloca %Array*, align 8 + %op__1 = alloca %Array*, align 8 + %compactOp = alloca %Array*, align 8 + %op = alloca %Array*, align 8 + %ops = alloca %Array*, align 8 + %nOps = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + store i64 0, i64* %nOps, align 4 + %0 = icmp eq i64 %termType, 2 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + store i64 2, i64* %nOps, align 4 + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i64 %termType, 3 + br i1 %1, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + store i64 8, i64* %nOps, align 4 + br label %continue__1 + +else__1: ; preds = %test1__1 + store i64 1, i64* %nOps, align 4 + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + %2 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 0) + %3 = load i64, i64* %nOps, align 4 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %3) + %5 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %6 = phi i64 [ 0, %continue__1 ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %6) + %9 = bitcast i8* %8 to %Array** + store %Array* %2, %Array** %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %2, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %4, %Array** %ops, align 8 + %11 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %17, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %12) + %15 = bitcast i8* %14 to %Array** + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %17 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %18 = icmp eq i64 %termType, 0 + br i1 %18, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %exit__2 + %19 = icmp eq i64 %termType, 1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %exit__2 + %20 = phi i1 [ %18, %exit__2 ], [ %19, %condFalse__1 ] + br i1 %20, label %then0__2, label %test1__2 + +then0__2: ; preds = %condContinue__1 + %21 = call %Array* @Microsoft__Quantum__Arrays___8023f18e08eb4c09a8a8acf673dba09b_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %21, %Array** %op, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %23 = sub i64 %22, 1 + br label %header__3 + +test1__2: ; preds = %condContinue__1 + %24 = icmp eq i64 %termType, 3 + br i1 %24, label %then1__2, label %test2__1 + +then1__2: ; preds = %test1__2 + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 2) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 3) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %27, align 1 + store i2 1, i2* %29, align 1 + store i2 1, i2* %31, align 1 + store i2 1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 2) + %40 = bitcast i8* %39 to i2* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 3) + %42 = bitcast i8* %41 to i2* + store i2 -1, i2* %36, align 1 + store i2 -1, i2* %38, align 1 + store i2 -1, i2* %40, align 1 + store i2 -1, i2* %42, align 1 + %43 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 0) + %45 = bitcast i8* %44 to i2* + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 1) + %47 = bitcast i8* %46 to i2* + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 2) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 3) + %51 = bitcast i8* %50 to i2* + store i2 1, i2* %45, align 1 + store i2 1, i2* %47, align 1 + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + %52 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 0) + %54 = bitcast i8* %53 to i2* + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 1) + %56 = bitcast i8* %55 to i2* + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 2) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 3) + %60 = bitcast i8* %59 to i2* + store i2 -1, i2* %54, align 1 + store i2 -1, i2* %56, align 1 + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + %61 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 0) + %63 = bitcast i8* %62 to i2* + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 1) + %65 = bitcast i8* %64 to i2* + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 2) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 3) + %69 = bitcast i8* %68 to i2* + store i2 1, i2* %63, align 1 + store i2 -1, i2* %65, align 1 + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + %70 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 0) + %72 = bitcast i8* %71 to i2* + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 1) + %74 = bitcast i8* %73 to i2* + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 2) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 3) + %78 = bitcast i8* %77 to i2* + store i2 -1, i2* %72, align 1 + store i2 1, i2* %74, align 1 + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + %79 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 0) + %81 = bitcast i8* %80 to i2* + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 1) + %83 = bitcast i8* %82 to i2* + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 2) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 3) + %87 = bitcast i8* %86 to i2* + store i2 -1, i2* %81, align 1 + store i2 1, i2* %83, align 1 + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + %88 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) + %90 = bitcast i8* %89 to i2* + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 1) + %92 = bitcast i8* %91 to i2* + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 2) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 3) + %96 = bitcast i8* %95 to i2* + store i2 1, i2* %90, align 1 + store i2 -1, i2* %92, align 1 + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + %compactOps = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 0) + %98 = bitcast i8* %97 to %Array** + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 1) + %100 = bitcast i8* %99 to %Array** + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 2) + %102 = bitcast i8* %101 to %Array** + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 3) + %104 = bitcast i8* %103 to %Array** + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 4) + %106 = bitcast i8* %105 to %Array** + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 5) + %108 = bitcast i8* %107 to %Array** + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 6) + %110 = bitcast i8* %109 to %Array** + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 7) + %112 = bitcast i8* %111 to %Array** + store %Array* %25, %Array** %98, align 8 + store %Array* %34, %Array** %100, align 8 + store %Array* %43, %Array** %102, align 8 + store %Array* %52, %Array** %104, align 8 + store %Array* %61, %Array** %106, align 8 + store %Array* %70, %Array** %108, align 8 + store %Array* %79, %Array** %110, align 8 + store %Array* %88, %Array** %112, align 8 + br label %header__4 + +test2__1: ; preds = %test1__2 + %113 = icmp eq i64 %termType, 2 + br i1 %113, label %then2__1, label %continue__2 + +then2__1: ; preds = %test2__1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %119 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %119, i64 0) + %121 = bitcast i8* %120 to i2* + %122 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %119, i64 1) + %123 = bitcast i8* %122 to i2* + store i2 -1, i2* %121, align 1 + store i2 -1, i2* %123, align 1 + %compactOps__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 0) + %125 = bitcast i8* %124 to %Array** + %126 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 1) + %127 = bitcast i8* %126 to %Array** + store %Array* %114, %Array** %125, align 8 + store %Array* %119, %Array** %127, align 8 + br label %header__12 + +continue__2: ; preds = %exit__16, %test2__1, %exit__11, %exit__3 + %128 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %129 = call i64 @__quantum__rt__array_get_size_1d(%Array* %128) + %130 = sub i64 %129, 1 + br label %header__17 + +header__3: ; preds = %exiting__3, %then0__2 + %131 = phi i64 [ 0, %then0__2 ], [ %139, %exiting__3 ] + %132 = icmp sle i64 %131, %23 + br i1 %132, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %131) + %134 = bitcast i8* %133 to i64* + %idx = load i64, i64* %134, align 4 + %135 = load %Array*, %Array** %op, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %135, i32 -1) + %136 = call %Array* @__quantum__rt__array_copy(%Array* %135, i1 false) + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %136, i64 %idx) + %138 = bitcast i8* %137 to i2* + store i2 -2, i2* %138, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %136, i32 1) + store %Array* %136, %Array** %op, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %135, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %139 = add i64 %131, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + %140 = call %Array* @__quantum__rt__array_copy(%Array* %4, i1 false) + %141 = load %Array*, %Array** %op, align 8 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %140, i64 0) + %143 = bitcast i8* %142 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %141, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 1) + %144 = load %Array*, %Array** %143, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %144, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %144, i32 -1) + store %Array* %141, %Array** %143, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + store %Array* %140, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + br label %continue__2 + +header__4: ; preds = %exiting__4, %then1__2 + %145 = phi i64 [ 0, %then1__2 ], [ %150, %exiting__4 ] + %146 = icmp sle i64 %145, 7 + br i1 %146, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %145) + %148 = bitcast i8* %147 to %Array** + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %150 = add i64 %145, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps, i32 1) + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %iOp = phi i64 [ 0, %exit__4 ], [ %159, %exiting__5 ] + %151 = icmp sle i64 %iOp, 7 + br i1 %151, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %iOp) + %153 = bitcast i8* %152 to %Array** + %154 = load %Array*, %Array** %153, align 8 + store %Array* %154, %Array** %compactOp, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 1) + %155 = call %Array* @Microsoft__Quantum__Arrays___8023f18e08eb4c09a8a8acf673dba09b_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %155, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %155, i32 1) + %156 = call %Array* @Microsoft__Quantum__Arrays___00d59157a6454ecdaf64b45c69ab4afd_Zipped__body(%Array* %indices, %Array* %154) + %157 = call i64 @__quantum__rt__array_get_size_1d(%Array* %156) + %158 = sub i64 %157, 1 + br label %header__6 + +exiting__5: ; preds = %exit__9 + %159 = add i64 %iOp, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + br label %header__10 + +header__6: ; preds = %exiting__6, %body__5 + %160 = phi i64 [ 0, %body__5 ], [ %171, %exiting__6 ] + %161 = icmp sle i64 %160, %158 + br i1 %161, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %156, i64 %160) + %163 = bitcast i8* %162 to { i64, i2 }** + %164 = load { i64, i2 }*, { i64, i2 }** %163, align 8 + %165 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %164, i32 0, i32 0 + %idx__1 = load i64, i64* %165, align 4 + %166 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %164, i32 0, i32 1 + %pauli = load i2, i2* %166, align 1 + %167 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + %168 = call %Array* @__quantum__rt__array_copy(%Array* %167, i1 false) + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %168, i64 %idx__1) + %170 = bitcast i8* %169 to i2* + store i2 %pauli, i2* %170, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %168, i32 1) + store %Array* %168, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %167, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %171 = add i64 %160, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %173 = bitcast i8* %172 to i64* + %174 = load i64, i64* %173, align 4 + %175 = add i64 %174, 1 + %176 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %177 = bitcast i8* %176 to i64* + %178 = load i64, i64* %177, align 4 + %179 = sub i64 %178, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %i = phi i64 [ %175, %exit__6 ], [ %185, %exiting__7 ] + %180 = icmp sle i64 %i, %179 + br i1 %180, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %181 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %181, i32 -1) + %182 = call %Array* @__quantum__rt__array_copy(%Array* %181, i1 false) + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %i) + %184 = bitcast i8* %183 to i2* + store i2 -2, i2* %184, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 1) + store %Array* %182, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %185 = add i64 %i, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 2) + %187 = bitcast i8* %186 to i64* + %188 = load i64, i64* %187, align 4 + %189 = add i64 %188, 1 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %191 = bitcast i8* %190 to i64* + %192 = load i64, i64* %191, align 4 + %193 = sub i64 %192, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %i__1 = phi i64 [ %189, %exit__7 ], [ %199, %exiting__8 ] + %194 = icmp sle i64 %i__1, %193 + br i1 %194, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %195 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %195, i32 -1) + %196 = call %Array* @__quantum__rt__array_copy(%Array* %195, i1 false) + %197 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %196, i64 %i__1) + %198 = bitcast i8* %197 to i2* + store i2 -2, i2* %198, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %196, i32 1) + store %Array* %196, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %195, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %199 = add i64 %i__1, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + %200 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 -1) + %201 = call %Array* @__quantum__rt__array_copy(%Array* %200, i1 false) + %202 = load %Array*, %Array** %op__1, align 8 + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %201, i64 %iOp) + %204 = bitcast i8* %203 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %202, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %202, i32 1) + %205 = load %Array*, %Array** %204, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %205, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %205, i32 -1) + store %Array* %202, %Array** %204, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %201, i32 1) + store %Array* %201, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %202, i32 -1) + %206 = sub i64 %157, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %207 = phi i64 [ 0, %exit__8 ], [ %213, %exiting__9 ] + %208 = icmp sle i64 %207, %206 + br i1 %208, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %209 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %156, i64 %207) + %210 = bitcast i8* %209 to { i64, i2 }** + %211 = load { i64, i2 }*, { i64, i2 }** %210, align 8 + %212 = bitcast { i64, i2 }* %211 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %212, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %213 = add i64 %207, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %156, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %200, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %202, i32 -1) + br label %exiting__5 + +header__10: ; preds = %exiting__10, %exit__5 + %214 = phi i64 [ 0, %exit__5 ], [ %219, %exiting__10 ] + %215 = icmp sle i64 %214, 7 + br i1 %215, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %216 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %214) + %217 = bitcast i8* %216 to %Array** + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %219 = add i64 %214, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps, i32 -1) + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %220 = phi i64 [ 0, %exit__10 ], [ %225, %exiting__11 ] + %221 = icmp sle i64 %220, 7 + br i1 %221, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %220) + %223 = bitcast i8* %222 to %Array** + %224 = load %Array*, %Array** %223, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %224, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %225 = add i64 %220, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_reference_count(%Array* %compactOps, i32 -1) + br label %continue__2 + +header__12: ; preds = %exiting__12, %then2__1 + %226 = phi i64 [ 0, %then2__1 ], [ %231, %exiting__12 ] + %227 = icmp sle i64 %226, 1 + br i1 %227, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %228 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %226) + %229 = bitcast i8* %228 to %Array** + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %231 = add i64 %226, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps__1, i32 1) + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %iOp__1 = phi i64 [ 0, %exit__12 ], [ %266, %exiting__13 ] + %232 = icmp sle i64 %iOp__1, 1 + br i1 %232, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %233 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %iOp__1) + %234 = bitcast i8* %233 to %Array** + %235 = load %Array*, %Array** %234, align 8 + store %Array* %235, %Array** %compactOp__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %235, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %235, i32 1) + %236 = call %Array* @Microsoft__Quantum__Arrays___8023f18e08eb4c09a8a8acf673dba09b_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %236, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 1) + %nIndices = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 -1) + %237 = call %Array* @__quantum__rt__array_copy(%Array* %236, i1 false) + %238 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %235, i64 0) + %239 = bitcast i8* %238 to i2* + %240 = load i2, i2* %239, align 1 + %241 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %242 = bitcast i8* %241 to i64* + %243 = load i64, i64* %242, align 4 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %237, i64 %243) + %245 = bitcast i8* %244 to i2* + store i2 %240, i2* %245, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %237, i32 1) + store %Array* %237, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %237, i32 -1) + %246 = call %Array* @__quantum__rt__array_copy(%Array* %237, i1 false) + %247 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %235, i64 1) + %248 = bitcast i8* %247 to i2* + %249 = load i2, i2* %248, align 1 + %250 = sub i64 %nIndices, 1 + %251 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %250) + %252 = bitcast i8* %251 to i64* + %253 = load i64, i64* %252, align 4 + %254 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %246, i64 %253) + %255 = bitcast i8* %254 to i2* + %256 = load i2, i2* %255, align 1 + store i2 %249, i2* %255, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 1) + store %Array* %246, %Array** %op__2, align 8 + %257 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %258 = bitcast i8* %257 to i64* + %259 = load i64, i64* %258, align 4 + %260 = add i64 %259, 1 + %261 = sub i64 %nIndices, 1 + %262 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %261) + %263 = bitcast i8* %262 to i64* + %264 = load i64, i64* %263, align 4 + %265 = sub i64 %264, 1 + br label %header__14 + +exiting__13: ; preds = %continue__3 + %266 = add i64 %iOp__1, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + br label %header__15 + +header__14: ; preds = %exiting__14, %body__13 + %i__2 = phi i64 [ %260, %body__13 ], [ %272, %exiting__14 ] + %267 = icmp sle i64 %i__2, %265 + br i1 %267, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %268 = load %Array*, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %268, i32 -1) + %269 = call %Array* @__quantum__rt__array_copy(%Array* %268, i1 false) + %270 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %269, i64 %i__2) + %271 = bitcast i8* %270 to i2* + store i2 -2, i2* %271, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %269, i32 1) + store %Array* %269, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %268, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %272 = add i64 %i__2, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + %273 = icmp eq i64 %nIndices, 4 + br i1 %273, label %then0__3, label %continue__3 + +then0__3: ; preds = %exit__14 + %274 = load %Array*, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %274, i32 -1) + %275 = call %Array* @__quantum__rt__array_copy(%Array* %274, i1 false) + %276 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %277 = bitcast i8* %276 to i64* + %278 = load i64, i64* %277, align 4 + %279 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %280 = bitcast i8* %279 to i64* + %281 = load i64, i64* %280, align 4 + %282 = icmp slt i64 %278, %281 + br i1 %282, label %condTrue__1, label %condContinue__2 + +condTrue__1: ; preds = %then0__3 + %283 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %284 = bitcast i8* %283 to i64* + %285 = load i64, i64* %284, align 4 + %286 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %287 = bitcast i8* %286 to i64* + %288 = load i64, i64* %287, align 4 + %289 = icmp slt i64 %285, %288 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__1, %then0__3 + %290 = phi i1 [ %289, %condTrue__1 ], [ %282, %then0__3 ] + %291 = select i1 %290, i2 0, i2 -2 + %292 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %293 = bitcast i8* %292 to i64* + %294 = load i64, i64* %293, align 4 + %295 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %275, i64 %294) + %296 = bitcast i8* %295 to i2* + store i2 %291, i2* %296, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %275, i32 1) + store %Array* %275, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %274, i32 -1) + br label %continue__3 + +continue__3: ; preds = %condContinue__2, %exit__14 + %297 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %297, i32 -1) + %298 = call %Array* @__quantum__rt__array_copy(%Array* %297, i1 false) + %299 = load %Array*, %Array** %op__2, align 8 + %300 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %298, i64 %iOp__1) + %301 = bitcast i8* %300 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %299, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %299, i32 1) + %302 = load %Array*, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %302, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %302, i32 -1) + store %Array* %299, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %298, i32 1) + store %Array* %298, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %235, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %299, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %236, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %237, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %297, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %235, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %299, i32 -1) + br label %exiting__13 + +header__15: ; preds = %exiting__15, %exit__13 + %303 = phi i64 [ 0, %exit__13 ], [ %308, %exiting__15 ] + %304 = icmp sle i64 %303, 1 + br i1 %304, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %305 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %303) + %306 = bitcast i8* %305 to %Array** + %307 = load %Array*, %Array** %306, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %307, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %308 = add i64 %303, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps__1, i32 -1) + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %309 = phi i64 [ 0, %exit__15 ], [ %314, %exiting__16 ] + %310 = icmp sle i64 %309, 1 + br i1 %310, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %311 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %309) + %312 = bitcast i8* %311 to %Array** + %313 = load %Array*, %Array** %312, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %313, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %314 = add i64 %309, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_reference_count(%Array* %compactOps__1, i32 -1) + br label %continue__2 + +header__17: ; preds = %exiting__17, %continue__2 + %315 = phi i64 [ 0, %continue__2 ], [ %320, %exiting__17 ] + %316 = icmp sle i64 %315, %130 + br i1 %316, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %317 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %315) + %318 = bitcast i8* %317 to %Array** + %319 = load %Array*, %Array** %318, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %319, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %320 = add i64 %315, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %128, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %2, i32 -1) + ret %Array* %128 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__ExpandedCoefficients__body(%Array* %coeff, i64 %termType) { +entry: + %coeffs = alloca %Array*, align 8 + %nCoeffs = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + store i64 0, i64* %nCoeffs, align 4 + %0 = icmp eq i64 %termType, 2 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + store i64 2, i64* %nCoeffs, align 4 + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i64 %termType, 3 + br i1 %1, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + store i64 8, i64* %nCoeffs, align 4 + br label %continue__1 + +else__1: ; preds = %test1__1 + store i64 1, i64* %nCoeffs, align 4 + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + %2 = load i64, i64* %nCoeffs, align 4 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %2) + %4 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %5 = phi i64 [ 0, %continue__1 ], [ %9, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %5) + %8 = bitcast i8* %7 to double* + store double 0.000000e+00, double* %8, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %3, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %10 = icmp eq i64 %termType, 0 + br i1 %10, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %exit__1 + %11 = icmp eq i64 %termType, 1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %exit__1 + %12 = phi i1 [ %10, %exit__1 ], [ %11, %condFalse__1 ] + br i1 %12, label %then0__2, label %test1__2 + +then0__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + %13 = call %Array* @__quantum__rt__array_copy(%Array* %3, i1 false) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %15 = bitcast i8* %14 to double* + %16 = load double, double* %15, align 8 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 0) + %18 = bitcast i8* %17 to double* + store double %16, double* %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + store %Array* %13, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + br label %continue__2 + +test1__2: ; preds = %condContinue__1 + %19 = icmp eq i64 %termType, 2 + br i1 %19, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %test1__2 + %20 = icmp eq i64 %termType, 3 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %test1__2 + %21 = phi i1 [ %19, %test1__2 ], [ %20, %condFalse__2 ] + br i1 %21, label %then1__2, label %continue__2 + +then1__2: ; preds = %condContinue__2 + %22 = sub i64 %2, 1 + br label %header__2 + +continue__2: ; preds = %exit__2, %condContinue__2, %then0__2 + %23 = load %Array*, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + ret %Array* %23 + +header__2: ; preds = %exiting__2, %then1__2 + %i = phi i64 [ 0, %then1__2 ], [ %33, %exiting__2 ] + %24 = icmp sle i64 %i, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = load %Array*, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + %26 = call %Array* @__quantum__rt__array_copy(%Array* %25, i1 false) + %27 = sdiv i64 %i, 2 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 %27) + %29 = bitcast i8* %28 to double* + %30 = load double, double* %29, align 8 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %26, i64 %i) + %32 = bitcast i8* %31 to double* + store double %30, double* %32, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + store %Array* %26, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %i, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + br label %continue__2 +} + +define internal void @Lifted__PartialApplication__41__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Array* }* %2, { i64, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__41__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Array* }* %2, { i64, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____body({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____adj({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @MemoryManagement__26__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Array* }*, { i64, %Array* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { double, double }*, %Array* }** + %13 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 0 + %15 = load { double, double }*, { double, double }** %14, align 8 + %16 = bitcast { double, double }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 %count-change) + %19 = bitcast { { double, double }*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 %count-change) + %21 = bitcast { i64, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__26__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Array* }*, { i64, %Array* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { double, double }*, %Array* }** + %13 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 0 + %15 = load { double, double }*, { double, double }** %14, align 8 + %16 = bitcast { double, double }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 %count-change) + %19 = bitcast { { double, double }*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 %count-change) + %21 = bitcast { i64, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateTermExpectation__body(%Callable* %inputStateUnitary, %Array* %ops, %Array* %coeffs, i64 %nQubits, i64 %nSamples) { +entry: + %jwTermEnergy = alloca double, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ops) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 1) + store double 0.000000e+00, double* %jwTermEnergy, align 8 + %8 = call %Array* @Microsoft__Quantum__Arrays___1d2b34a15cf5490eb8142fe0e14c514a_Zipped__body(%Array* %coeffs, %Array* %ops) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %8) + %10 = sub i64 %9, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %11 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %11) + %14 = bitcast i8* %13 to { double, %Array* }** + %15 = load { double, %Array* }*, { double, %Array* }** %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %15, i32 0, i32 0 + %coeff = load double, double* %16, align 8 + %17 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %15, i32 0, i32 1 + %op = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %18 = call double @Microsoft__Quantum__Math__AbsD__body(double %coeff) + %19 = fcmp oge double %18, 1.000000e-10 + br i1 %19, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Measure__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %op, i32 1) + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Callable*, %Array* }* + %23 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %22, i32 0, i32 1 + store %Callable* %20, %Callable** %23, align 8 + store %Array* %op, %Array** %24, align 8 + %25 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__42__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__27__FunctionTable, %Tuple* %21) + %termExpectation = call double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %inputStateUnitary, %Callable* %25, i64 %nQubits, i64 %nSamples) + %26 = load double, double* %jwTermEnergy, align 8 + %27 = fmul double 2.000000e+00, %termExpectation + %28 = fsub double %27, 1.000000e+00 + %29 = fmul double %28, %coeff + %30 = fadd double %26, %29 + store double %30, double* %jwTermEnergy, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %25, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %31 = add i64 %11, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %32 = load double, double* %jwTermEnergy, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + %33 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %39, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %34) + %37 = bitcast i8* %36 to %Array** + %38 = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %39 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 -1) + %40 = sub i64 %9, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %41 = phi i64 [ 0, %exit__3 ], [ %49, %exiting__4 ] + %42 = icmp sle i64 %41, %40 + br i1 %42, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %41) + %44 = bitcast i8* %43 to { double, %Array* }** + %45 = load { double, %Array* }*, { double, %Array* }** %44, align 8 + %46 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %45, i32 0, i32 1 + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %47, i32 -1) + %48 = bitcast { double, %Array* }* %45 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %49 = add i64 %41, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + ret double %32 +} + +define internal void @Lifted__PartialApplication__42__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Measure__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Result* }* + %7 = getelementptr inbounds { %Result* }, { %Result* }* %6, i32 0, i32 0 + store %Result* %5, %Result** %7, align 8 + ret void +} + +define internal void @MemoryManagement__27__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__27__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body(%Array* %data, %Array* %termType, i64 %idx) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %idx) + %14 = bitcast i8* %13 to { %Array*, %Array* }** + %15 = load { %Array*, %Array* }*, { %Array*, %Array* }** %14, align 8 + %16 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %15, %Array* %termType) + %17 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %18) + %21 = bitcast i8* %20 to { %Array*, %Array* }** + %22 = load { %Array*, %Array* }*, { %Array*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %22, i32 0, i32 0 + %24 = load %Array*, %Array** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %24, i32 -1) + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %22, i32 0, i32 1 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 -1) + %27 = bitcast { %Array*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %16 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %term, %Array* %termType) { +entry: + %0 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %term, i32 0, i32 0 + %idxFermions = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %term, i32 0, i32 1 + %coeff = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %2 = bitcast { %Array*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Array* }* + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + store %Array* %termType, %Array** %5, align 8 + store %Array* %coeff, %Array** %6, align 8 + %7 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %4, %Array* %idxFermions) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %7 +} + +define internal void @Lifted__PartialApplication__43__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { i64 }* + %6 = getelementptr inbounds { i64 }, { i64 }* %5, i32 0, i32 0 + %7 = load i64, i64* %6, align 4 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, i64 }* getelementptr ({ %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array*, i64 }* + %10 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 2 + store %Array* %2, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + store i64 %7, i64* %12, align 4 + %13 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load i64, i64* %3, align 4 + %7 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body(%Array* %4, %Array* %5, i64 %6) + %8 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %8, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %9, align 8 + ret void +} + +define internal void @MemoryManagement__28__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { %Array*, %Array* }** + %11 = load { %Array*, %Array* }*, { %Array*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %18 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__28__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { %Array*, %Array* }** + %11 = load { %Array*, %Array* }*, { %Array*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %18 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +attributes #0 = { nofree nosync nounwind readnone speculatable willreturn } diff --git a/src/munchkin/tests/qsharp/hydrogen-sim/qir/hydrogen-sim.ll b/src/munchkin/tests/qsharp/hydrogen-sim/qir/hydrogen-sim.ll new file mode 100644 index 0000000..21612f2 --- /dev/null +++ b/src/munchkin/tests/qsharp/hydrogen-sim/qir/hydrogen-sim.ll @@ -0,0 +1,51198 @@ + +%Tuple = type opaque +%Array = type opaque +%Callable = type opaque +%Range = type { i64, i64, i64 } +%Qubit = type opaque +%Result = type opaque +%String = type opaque + +@PartialApplication__1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__1__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Characterization__RobustPhaseEstimation__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Characterization__RobustPhaseEstimation__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__2__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@PartialApplication__3__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__4__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@0 = internal constant [46 x i8] c"Unitary coupled-cluster PQRS failed: indices \00" +@1 = internal constant [3 x i8] c", \00" +@2 = internal constant [18 x i8] c" must be distinct\00" +@3 = internal constant [44 x i8] c"Unitary coupled-cluster PQ failed: indices \00" +@PartialApplication__5__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____ctladj__wrapper] +@MemoryManagement__3__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__3__RefCount, void (%Tuple*, i32)* @MemoryManagement__3__AliasCount] +@PartialApplication__6__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj__wrapper] +@MemoryManagement__4__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__4__RefCount, void (%Tuple*, i32)* @MemoryManagement__4__AliasCount] +@4 = internal constant [86 x i8] c"ComputeJordanWignerString failed. `idxFermions` must contain an even number of terms.\00" +@5 = internal constant [46 x i8] c"ComputeJordanWignerString failed. fermionIdx \00" +@6 = internal constant [15 x i8] c" out of range.\00" +@7 = internal constant [47 x i8] c"Completely invalid cluster operator specified.\00" +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorFunction____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorFunction____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__7__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef0__JordanWignerStateAsGeneratorIndex____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0__JordanWignerStateAsGeneratorIndex____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__5__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__5__RefCount, void (%Tuple*, i32)* @MemoryManagement__5__AliasCount] +@Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionFunction__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionFunction__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__8__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__X__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PrepareSingleConfigurationalStateSingleSiteOccupation____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PrepareSingleConfigurationalStateSingleSiteOccupation____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__ctladj__wrapper] +@PartialApplication__9__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__6__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__6__RefCount, void (%Tuple*, i32)* @MemoryManagement__6__AliasCount] +@PartialApplication__10__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@MemoryManagement__7__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__7__RefCount, void (%Tuple*, i32)* @MemoryManagement__7__AliasCount] +@PartialApplication__11__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____ctladj__wrapper] +@MemoryManagement__8__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__8__RefCount, void (%Tuple*, i32)* @MemoryManagement__8__AliasCount] +@PartialApplication__12__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] +@PartialApplication__13__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__HTermsToGenIdx__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__9__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__9__RefCount, void (%Tuple*, i32)* @MemoryManagement__9__AliasCount] +@8 = internal constant [59 x i8] c"PNorm failed. `p` must be a positive real number, but was \00" +@9 = internal constant [2 x i8] c".\00" +@10 = internal constant [46 x i8] c"`Length(bits)` must be less than 64, but was \00" +@11 = internal constant [33 x i8] c"`bits` must be between 0 and 63 \00" +@12 = internal constant [34 x i8] c"`number` must be between 0 and 2^\00" +@13 = internal constant [15 x i8] c" - 1, but was \00" +@14 = internal constant [38 x i8] c"Unexpected number of auxiliary qubits\00" +@Microsoft__Quantum__Canon__ApplyAnd__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyAnd__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyAnd__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyAnd__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyAnd__ctladj__wrapper] +@15 = internal constant [43 x i8] c"Probability of the measurement must be 0.5\00" +@Microsoft__Quantum__Canon___b950c6e85b1944ae91a2dee4f20f4c18_Fst__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___b950c6e85b1944ae91a2dee4f20f4c18_Fst__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon__ApplyP__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__ctladj__wrapper] +@16 = internal constant [75 x i8] c"operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0.\00" +@PartialApplication__14__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper] +@MemoryManagement__10__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__10__RefCount, void (%Tuple*, i32)* @MemoryManagement__10__AliasCount] +@PartialApplication__15__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper] +@MemoryManagement__11__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__11__RefCount, void (%Tuple*, i32)* @MemoryManagement__11__AliasCount] +@Microsoft__Quantum__Intrinsic__H__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] +@PartialApplication__16__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__S__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] +@17 = internal constant [38 x i8] c"MultiplexPauli failed. Invalid pauli \00" +@18 = internal constant [7 x i8] c"PauliX\00" +@19 = internal constant [7 x i8] c"PauliY\00" +@20 = internal constant [7 x i8] c"PauliZ\00" +@21 = internal constant [7 x i8] c"PauliI\00" +@PartialApplication__17__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctladj__wrapper] +@PartialApplication__18__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__ctladj__wrapper] +@PartialApplication__19__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctladj__wrapper] +@PartialApplication__20__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctladj__wrapper] +@PartialApplication__21__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctladj__wrapper] +@PartialApplication__22__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctladj__wrapper] +@PartialApplication__23__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__ctladj__wrapper] +@PartialApplication__24__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctladj__wrapper] +@PartialApplication__25__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctladj__wrapper] +@PartialApplication__26__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctladj__wrapper] +@Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__ctladj__wrapper] +@MemoryManagement__12__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__12__RefCount, void (%Tuple*, i32)* @MemoryManagement__12__AliasCount] +@PartialApplication__27__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__ctladj__wrapper] +@Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__ctladj__wrapper] +@MemoryManagement__13__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__13__RefCount, void (%Tuple*, i32)* @MemoryManagement__13__AliasCount] +@PartialApplication__28__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctladj__wrapper] +@Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctladj__wrapper] +@MemoryManagement__14__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__14__RefCount, void (%Tuple*, i32)* @MemoryManagement__14__AliasCount] +@PartialApplication__29__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctladj__wrapper] +@Microsoft__Quantum__Canon__RAll1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__RAll1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__RAll1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__RAll1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__RAll1__ctladj__wrapper] +@MemoryManagement__15__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__15__RefCount, void (%Tuple*, i32)* @MemoryManagement__15__AliasCount] +@PartialApplication__30__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctladj__wrapper] +@PartialApplication__31__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctladj__wrapper] +@PartialApplication__32__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__ctladj__wrapper] +@PartialApplication__33__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__ctladj__wrapper] +@PartialApplication__34__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__ctladj__wrapper] +@PartialApplication__35__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__ctladj__wrapper] +@PartialApplication__36__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__R1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__R1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__R1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__R1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__R1__ctladj__wrapper] +@MemoryManagement__16__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__16__RefCount, void (%Tuple*, i32)* @MemoryManagement__16__AliasCount] +@PartialApplication__37__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__ctladj__wrapper] +@PartialApplication__38__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__38__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__38__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__38__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__38__ctladj__wrapper] +@PartialApplication__39__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__39__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__39__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__39__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__39__ctladj__wrapper] +@22 = internal constant [75 x i8] c"MultiplexOperations failed. Number of index qubits must be greater than 0.\00" +@PartialApplication__40__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__ctladj__wrapper] +@Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__ctladj__wrapper] +@MemoryManagement__17__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__17__RefCount, void (%Tuple*, i32)* @MemoryManagement__17__AliasCount] +@PartialApplication__41__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__ctladj__wrapper] +@Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__ctladj__wrapper] +@MemoryManagement__18__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__18__RefCount, void (%Tuple*, i32)* @MemoryManagement__18__AliasCount] +@PartialApplication__42__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__42__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___6d6f9db43d6b47c4a0fad10624f517d9___QsRef3__WithFirstInputAppliedCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___6d6f9db43d6b47c4a0fad10624f517d9___QsRef3__WithFirstInputAppliedCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__19__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__19__RefCount, void (%Tuple*, i32)* @MemoryManagement__19__AliasCount] +@PartialApplication__43__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__43__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__43__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__43__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__43__ctladj__wrapper] +@Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__ctladj__wrapper] +@MemoryManagement__20__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__20__RefCount, void (%Tuple*, i32)* @MemoryManagement__20__AliasCount] +@PartialApplication__44__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__44__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__44__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__44__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__44__ctladj__wrapper] +@Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__ctladj__wrapper] +@MemoryManagement__21__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__21__RefCount, void (%Tuple*, i32)* @MemoryManagement__21__AliasCount] +@PartialApplication__45__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__45__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__45__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__45__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__45__ctladj__wrapper] +@MemoryManagement__22__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__22__RefCount, void (%Tuple*, i32)* @MemoryManagement__22__AliasCount] +@PartialApplication__46__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__46__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__46__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__46__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__46__ctladj__wrapper] +@Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____ctladj__wrapper] +@MemoryManagement__23__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__23__RefCount, void (%Tuple*, i32)* @MemoryManagement__23__AliasCount] +@23 = internal constant [47 x i8] c"Control register shorter than control pattern.\00" +@PartialApplication__47__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__47__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__47__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__47__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__47__ctladj__wrapper] +@Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctladj__wrapper] +@MemoryManagement__24__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__24__RefCount, void (%Tuple*, i32)* @MemoryManagement__24__AliasCount] +@PartialApplication__48__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__48__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__48__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__48__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__48__ctladj__wrapper] +@Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctladj__wrapper] +@PartialApplication__49__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__49__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__49__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__49__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__49__ctladj__wrapper] +@Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj__wrapper] +@MemoryManagement__25__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__25__RefCount, void (%Tuple*, i32)* @MemoryManagement__25__AliasCount] +@24 = internal constant [11 x i8] c"Odd order \00" +@25 = internal constant [20 x i8] c" not yet supported.\00" +@PartialApplication__50__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__50__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___92b2fc57a79541c8b9df7a25eea41fad___QsRef3__ComposedOutput____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___92b2fc57a79541c8b9df7a25eea41fad___QsRef3__ComposedOutput____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__26__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__26__RefCount, void (%Tuple*, i32)* @MemoryManagement__26__AliasCount] +@26 = internal constant [36 x i8] c"Qubit in invalid state. Expecting: \00" +@27 = internal constant [2 x i8] c"\22\00" +@28 = internal constant [13 x i8] c"\0A\09Expected:\09\00" +@29 = internal constant [5 x i8] c"true\00" +@30 = internal constant [6 x i8] c"false\00" +@31 = internal constant [11 x i8] c"\0A\09Actual:\09\00" +@32 = internal constant [32 x i8] c"`to` must be larger than `from`\00" +@33 = internal constant [39 x i8] c"Array must be of the length at least 1\00" +@34 = internal constant [22 x i8] c"Index is out of bound\00" +@PartialApplication__51__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__51__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___7d83b54afca94675b63617b69b56aa7a_ElementAt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___7d83b54afca94675b63617b69b56aa7a_ElementAt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__27__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__27__RefCount, void (%Tuple*, i32)* @MemoryManagement__27__AliasCount] +@PartialApplication__52__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__52__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___b20df4913ab0459888bcf1448be084b3_ElementAt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___b20df4913ab0459888bcf1448be084b3_ElementAt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__28__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__28__RefCount, void (%Tuple*, i32)* @MemoryManagement__28__AliasCount] +@35 = internal constant [71 x i8] c"Specified output array length must be longer than `inputArray` length.\00" +@PartialApplication__53__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__53__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation____QsRef3__IdxToCoeff____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__IdxToCoeff____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation__PauliCoefficientFromGenIdx__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__PauliCoefficientFromGenIdx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__29__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__29__RefCount, void (%Tuple*, i32)* @MemoryManagement__29__AliasCount] +@PartialApplication__54__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__54__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation____QsRef3__IdxToUnitary____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__IdxToUnitary____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation____QsRef3__PauliLCUUnitary____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__PauliLCUUnitary____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__30__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__30__RefCount, void (%Tuple*, i32)* @MemoryManagement__30__AliasCount] +@PartialApplication__55__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__55__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__55__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__55__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__55__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____ctladj__wrapper] +@MemoryManagement__31__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__31__RefCount, void (%Tuple*, i32)* @MemoryManagement__31__AliasCount] +@PartialApplication__56__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__56__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__56__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__56__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__56__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____ctladj__wrapper] +@MemoryManagement__32__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__32__RefCount, void (%Tuple*, i32)* @MemoryManagement__32__AliasCount] +@PartialApplication__57__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__57__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation____QsRef3___AddGeneratorSystems____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3___AddGeneratorSystems____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__33__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__33__RefCount, void (%Tuple*, i32)* @MemoryManagement__33__AliasCount] +@Microsoft__Quantum__Simulation__IdentityGeneratorIndex__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Preparation__PrepareArbitraryStateD__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctladj__wrapper] +@Microsoft__Quantum__Canon__MultiplexerFromGenerator__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__MultiplexerFromGenerator__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__58__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__58__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__58__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__58__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__58__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____ctladj__wrapper] +@MemoryManagement__34__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__34__RefCount, void (%Tuple*, i32)* @MemoryManagement__34__AliasCount] +@Microsoft__Quantum__Simulation__AddGeneratorSystems__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__59__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__59__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__59__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__59__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__59__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____ctladj__wrapper] +@MemoryManagement__35__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__35__RefCount, void (%Tuple*, i32)* @MemoryManagement__35__AliasCount] +@PartialApplication__60__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__60__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__60__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__60__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__60__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____ctladj__wrapper] +@MemoryManagement__36__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__36__RefCount, void (%Tuple*, i32)* @MemoryManagement__36__AliasCount] +@PartialApplication__61__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__61__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__61__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__61__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__61__ctladj__wrapper] +@PartialApplication__62__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__62__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__62__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__62__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__62__ctladj__wrapper] +@MemoryManagement__37__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__37__RefCount, void (%Tuple*, i32)* @MemoryManagement__37__AliasCount] +@PartialApplication__63__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__63__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__63__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__63__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__63__ctladj__wrapper] +@PartialApplication__64__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__64__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__64__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__64__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__64__ctladj__wrapper] +@PartialApplication__65__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__65__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__65__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__65__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__65__ctladj__wrapper] +@PartialApplication__66__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__66__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__66__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__66__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__66__ctladj__wrapper] +@Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____ctladj__wrapper] +@MemoryManagement__38__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__38__RefCount, void (%Tuple*, i32)* @MemoryManagement__38__AliasCount] +@PartialApplication__67__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__67__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__67__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__67__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__67__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____ctladj__wrapper] +@MemoryManagement__39__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__39__RefCount, void (%Tuple*, i32)* @MemoryManagement__39__AliasCount] +@PartialApplication__68__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__68__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__68__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__68__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__68__ctladj__wrapper] +@PartialApplication__69__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__69__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__69__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__69__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__69__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____ctladj__wrapper] +@MemoryManagement__40__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__40__RefCount, void (%Tuple*, i32)* @MemoryManagement__40__AliasCount] +@PartialApplication__70__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__70__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__70__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__70__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__70__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____ctladj__wrapper] +@MemoryManagement__41__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__41__RefCount, void (%Tuple*, i32)* @MemoryManagement__41__AliasCount] +@PartialApplication__71__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__71__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Math__ComplexPolar__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__ComplexPolar__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__42__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__42__RefCount, void (%Tuple*, i32)* @MemoryManagement__42__AliasCount] +@Microsoft__Quantum__Math__AbsD__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__AbsD__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__72__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__72__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__73__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__73__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__74__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__74__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__75__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__75__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__75__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__75__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__75__ctladj__wrapper] +@Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____ctladj__wrapper] +@MemoryManagement__43__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__43__RefCount, void (%Tuple*, i32)* @MemoryManagement__43__AliasCount] + +define { double, double }* @Microsoft__Quantum__Chemistry__Samples__Hydrogen__GetEnergyByQubitization__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i64 %nBitsPrecision) { +entry: + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 1 + %fermionTermData = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %fermionTermData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 2 + %statePrepData = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %statePrepData, i32 0, i32 1 + %64 = load %Array*, %Array** %63, align 8 + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %64) + %66 = sub i64 %65, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %67 = phi i64 [ 0, %exit__4 ], [ %78, %exiting__5 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %67) + %70 = bitcast i8* %69 to { { double, double }*, %Array* }** + %71 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %70, align 8 + %72 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 0 + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %74, i32 1) + %75 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { { double, double }*, %Array* }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %78 = add i64 %67, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %79 = bitcast { i64, %Array* }* %statePrepData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + %81 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 0 + %nSpinOrbitals = load i64, i64* %81, align 4 + %82 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %83 = phi i64 [ 0, %exit__5 ], [ %93, %exiting__6 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %83) + %86 = bitcast i8* %85 to { %Array*, %Array* }** + %87 = load { %Array*, %Array* }*, { %Array*, %Array* }** %86, align 8 + %88 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %89, i32 1) + %90 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 1 + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 1) + %92 = bitcast { %Array*, %Array* }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %92, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %93 = add i64 %83, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %94 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %95 = phi i64 [ 0, %exit__6 ], [ %105, %exiting__7 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %95) + %98 = bitcast i8* %97 to { %Array*, %Array* }** + %99 = load { %Array*, %Array* }*, { %Array*, %Array* }** %98, align 8 + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 0 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %101, i32 1) + %102 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { %Array*, %Array* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %105 = add i64 %95, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %106 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %107 = phi i64 [ 0, %exit__7 ], [ %117, %exiting__8 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %107) + %110 = bitcast i8* %109 to { %Array*, %Array* }** + %111 = load { %Array*, %Array* }*, { %Array*, %Array* }** %110, align 8 + %112 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 0 + %113 = load %Array*, %Array** %112, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %113, i32 1) + %114 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 1 + %115 = load %Array*, %Array** %114, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %115, i32 1) + %116 = bitcast { %Array*, %Array* }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %117 = add i64 %107, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %118 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %119 = phi i64 [ 0, %exit__8 ], [ %129, %exiting__9 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %129 = add i64 %119, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %130 = sub i64 %65, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %131 = phi i64 [ 0, %exit__9 ], [ %142, %exiting__10 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %131) + %134 = bitcast i8* %133 to { { double, double }*, %Array* }** + %135 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %134, align 8 + %136 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 0 + %137 = load { double, double }*, { double, double }** %136, align 8 + %138 = bitcast { double, double }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %138, i32 1) + %139 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 1 + %140 = load %Array*, %Array** %139, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + %141 = bitcast { { double, double }*, %Array* }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %142 = add i64 %131, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %143 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 3 + %energyOffset = load double, double* %143, align 8 + %144 = call { i64, { double, %Callable* }* }* @Microsoft__Quantum__Chemistry__JordanWigner__QubitizationOracle__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData) + %145 = getelementptr inbounds { i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* %144, i32 0, i32 0 + %nQubits = load i64, i64* %145, align 4 + %146 = getelementptr inbounds { i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* %144, i32 0, i32 1 + %147 = load { double, %Callable* }*, { double, %Callable* }** %146, align 8 + %148 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %147, i32 0, i32 0 + %oneNorm = load double, double* %148, align 8 + %149 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %147, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %149, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %150 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %151 = sub i64 %65, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %152 = phi i64 [ 0, %exit__10 ], [ %163, %exiting__11 ] + %153 = icmp sle i64 %152, %151 + br i1 %153, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %152) + %155 = bitcast i8* %154 to { { double, double }*, %Array* }** + %156 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %156, i32 0, i32 0 + %158 = load { double, double }*, { double, double }** %157, align 8 + %159 = bitcast { double, double }* %158 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %159, i32 1) + %160 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %156, i32 0, i32 1 + %161 = load %Array*, %Array** %160, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 1) + %162 = bitcast { { double, double }*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %162, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %163 = add i64 %152, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_reference_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 1) + %164 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Array* }* }* getelementptr ({ %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* null, i32 1) to i64)) + %165 = bitcast %Tuple* %164 to { %Callable*, { i64, %Array* }* }* + %166 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %165, i32 0, i32 0 + %167 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %165, i32 0, i32 1 + store %Callable* %150, %Callable** %166, align 8 + store { i64, %Array* }* %statePrepData, { i64, %Array* }** %167, align 8 + %statePrep = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %164) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrep, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrep, i32 1) + %168 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Characterization__RobustPhaseEstimation__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %169 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) + %170 = bitcast %Tuple* %169 to { %Callable*, i64 }* + %171 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %170, i32 0, i32 0 + %172 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %170, i32 0, i32 1 + store %Callable* %168, %Callable** %171, align 8 + store i64 %nBitsPrecision, i64* %172, align 4 + %phaseEstAlgorithm = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %169) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + %estPhase = call double @Microsoft__Quantum__Simulation__EstimateEnergy__body(i64 %nQubits, %Callable* %statePrep, %Callable* %oracle, %Callable* %phaseEstAlgorithm) + %173 = call double @__quantum__qis__sin__body(double %estPhase) + %174 = fmul double %173, %oneNorm + %estEnergy = fadd double %174, %energyOffset + %175 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %176 = bitcast %Tuple* %175 to { double, double }* + %177 = getelementptr inbounds { double, double }, { double, double }* %176, i32 0, i32 0 + %178 = getelementptr inbounds { double, double }, { double, double }* %176, i32 0, i32 1 + store double %estPhase, double* %177, align 8 + store double %estEnergy, double* %178, align 8 + %179 = sub i64 %3, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %180 = phi i64 [ 0, %exit__11 ], [ %190, %exiting__12 ] + %181 = icmp sle i64 %180, %179 + br i1 %181, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %180) + %183 = bitcast i8* %182 to { %Array*, %Array* }** + %184 = load { %Array*, %Array* }*, { %Array*, %Array* }** %183, align 8 + %185 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %184, i32 0, i32 0 + %186 = load %Array*, %Array** %185, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %186, i32 -1) + %187 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %184, i32 0, i32 1 + %188 = load %Array*, %Array** %187, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %188, i32 -1) + %189 = bitcast { %Array*, %Array* }* %184 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %189, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %190 = add i64 %180, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %191 = sub i64 %18, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %192 = phi i64 [ 0, %exit__12 ], [ %202, %exiting__13 ] + %193 = icmp sle i64 %192, %191 + br i1 %193, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %194 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %192) + %195 = bitcast i8* %194 to { %Array*, %Array* }** + %196 = load { %Array*, %Array* }*, { %Array*, %Array* }** %195, align 8 + %197 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %196, i32 0, i32 0 + %198 = load %Array*, %Array** %197, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %198, i32 -1) + %199 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %196, i32 0, i32 1 + %200 = load %Array*, %Array** %199, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 -1) + %201 = bitcast { %Array*, %Array* }* %196 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %201, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %202 = add i64 %192, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %203 = sub i64 %33, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %204 = phi i64 [ 0, %exit__13 ], [ %214, %exiting__14 ] + %205 = icmp sle i64 %204, %203 + br i1 %205, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %206 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %204) + %207 = bitcast i8* %206 to { %Array*, %Array* }** + %208 = load { %Array*, %Array* }*, { %Array*, %Array* }** %207, align 8 + %209 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %208, i32 0, i32 0 + %210 = load %Array*, %Array** %209, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %210, i32 -1) + %211 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %208, i32 0, i32 1 + %212 = load %Array*, %Array** %211, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %212, i32 -1) + %213 = bitcast { %Array*, %Array* }* %208 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %213, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %214 = add i64 %204, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %215 = sub i64 %48, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %216 = phi i64 [ 0, %exit__14 ], [ %226, %exiting__15 ] + %217 = icmp sle i64 %216, %215 + br i1 %217, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %218 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %216) + %219 = bitcast i8* %218 to { %Array*, %Array* }** + %220 = load { %Array*, %Array* }*, { %Array*, %Array* }** %219, align 8 + %221 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %220, i32 0, i32 0 + %222 = load %Array*, %Array** %221, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %222, i32 -1) + %223 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %220, i32 0, i32 1 + %224 = load %Array*, %Array** %223, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %224, i32 -1) + %225 = bitcast { %Array*, %Array* }* %220 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %225, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %226 = add i64 %216, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %227 = sub i64 %65, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %228 = phi i64 [ 0, %exit__15 ], [ %239, %exiting__16 ] + %229 = icmp sle i64 %228, %227 + br i1 %229, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %230 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %228) + %231 = bitcast i8* %230 to { { double, double }*, %Array* }** + %232 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %231, align 8 + %233 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %232, i32 0, i32 0 + %234 = load { double, double }*, { double, double }** %233, align 8 + %235 = bitcast { double, double }* %234 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %235, i32 -1) + %236 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %232, i32 0, i32 1 + %237 = load %Array*, %Array** %236, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %237, i32 -1) + %238 = bitcast { { double, double }*, %Array* }* %232 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %238, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %239 = add i64 %228, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + %240 = sub i64 %3, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %241 = phi i64 [ 0, %exit__16 ], [ %251, %exiting__17 ] + %242 = icmp sle i64 %241, %240 + br i1 %242, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %243 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %241) + %244 = bitcast i8* %243 to { %Array*, %Array* }** + %245 = load { %Array*, %Array* }*, { %Array*, %Array* }** %244, align 8 + %246 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %245, i32 0, i32 0 + %247 = load %Array*, %Array** %246, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %247, i32 -1) + %248 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %245, i32 0, i32 1 + %249 = load %Array*, %Array** %248, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %249, i32 -1) + %250 = bitcast { %Array*, %Array* }* %245 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %250, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %251 = add i64 %241, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %252 = sub i64 %18, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %253 = phi i64 [ 0, %exit__17 ], [ %263, %exiting__18 ] + %254 = icmp sle i64 %253, %252 + br i1 %254, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %255 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %253) + %256 = bitcast i8* %255 to { %Array*, %Array* }** + %257 = load { %Array*, %Array* }*, { %Array*, %Array* }** %256, align 8 + %258 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %257, i32 0, i32 0 + %259 = load %Array*, %Array** %258, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %259, i32 -1) + %260 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %257, i32 0, i32 1 + %261 = load %Array*, %Array** %260, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %261, i32 -1) + %262 = bitcast { %Array*, %Array* }* %257 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %262, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %263 = add i64 %253, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %264 = sub i64 %33, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %265 = phi i64 [ 0, %exit__18 ], [ %275, %exiting__19 ] + %266 = icmp sle i64 %265, %264 + br i1 %266, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %267 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %265) + %268 = bitcast i8* %267 to { %Array*, %Array* }** + %269 = load { %Array*, %Array* }*, { %Array*, %Array* }** %268, align 8 + %270 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %269, i32 0, i32 0 + %271 = load %Array*, %Array** %270, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %271, i32 -1) + %272 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %269, i32 0, i32 1 + %273 = load %Array*, %Array** %272, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %273, i32 -1) + %274 = bitcast { %Array*, %Array* }* %269 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %274, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %275 = add i64 %265, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %276 = sub i64 %48, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %277 = phi i64 [ 0, %exit__19 ], [ %287, %exiting__20 ] + %278 = icmp sle i64 %277, %276 + br i1 %278, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %279 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %277) + %280 = bitcast i8* %279 to { %Array*, %Array* }** + %281 = load { %Array*, %Array* }*, { %Array*, %Array* }** %280, align 8 + %282 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %281, i32 0, i32 0 + %283 = load %Array*, %Array** %282, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %283, i32 -1) + %284 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %281, i32 0, i32 1 + %285 = load %Array*, %Array** %284, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %285, i32 -1) + %286 = bitcast { %Array*, %Array* }* %281 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %286, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %287 = add i64 %277, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %288 = sub i64 %65, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %289 = phi i64 [ 0, %exit__20 ], [ %300, %exiting__21 ] + %290 = icmp sle i64 %289, %288 + br i1 %290, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %291 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %289) + %292 = bitcast i8* %291 to { { double, double }*, %Array* }** + %293 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %292, align 8 + %294 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %293, i32 0, i32 0 + %295 = load { double, double }*, { double, double }** %294, align 8 + %296 = bitcast { double, double }* %295 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %296, i32 -1) + %297 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %293, i32 0, i32 1 + %298 = load %Array*, %Array** %297, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %298, i32 -1) + %299 = bitcast { { double, double }*, %Array* }* %293 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %299, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %300 = add i64 %289, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrep, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrep, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 -1) + %301 = bitcast { double, %Callable* }* %147 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %301, i32 -1) + %302 = bitcast { i64, { double, %Callable* }* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %302, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePrep, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePrep, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %phaseEstAlgorithm, i32 -1) + ret { double, double }* %176 +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal { i64, { double, %Callable* }* }* @Microsoft__Quantum__Chemistry__JordanWigner__QubitizationOracle__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData) { +entry: + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 1 + %1 = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %1, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = sub i64 %4, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %6 = phi i64 [ 0, %entry ], [ %16, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %6) + %9 = bitcast i8* %8 to { %Array*, %Array* }** + %10 = load { %Array*, %Array* }*, { %Array*, %Array* }** %9, align 8 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %10, i32 0, i32 0 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %10, i32 0, i32 1 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array*, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %16 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %17 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %1, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %18) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %21) + %24 = bitcast i8* %23 to { %Array*, %Array* }** + %25 = load { %Array*, %Array* }*, { %Array*, %Array* }** %24, align 8 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %25, i32 0, i32 0 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %28 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %25, i32 0, i32 1 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 1) + %30 = bitcast { %Array*, %Array* }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %32 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %1, i32 0, i32 2 + %33 = load %Array*, %Array** %32, align 8 + %34 = call i64 @__quantum__rt__array_get_size_1d(%Array* %33) + %35 = sub i64 %34, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %36 = phi i64 [ 0, %exit__2 ], [ %46, %exiting__3 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %36) + %39 = bitcast i8* %38 to { %Array*, %Array* }** + %40 = load { %Array*, %Array* }*, { %Array*, %Array* }** %39, align 8 + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %40, i32 0, i32 0 + %42 = load %Array*, %Array** %41, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + %43 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %40, i32 0, i32 1 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { %Array*, %Array* }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %36, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + %47 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %1, i32 0, i32 3 + %48 = load %Array*, %Array** %47, align 8 + %49 = call i64 @__quantum__rt__array_get_size_1d(%Array* %48) + %50 = sub i64 %49, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %51 = phi i64 [ 0, %exit__3 ], [ %61, %exiting__4 ] + %52 = icmp sle i64 %51, %50 + br i1 %52, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %48, i64 %51) + %54 = bitcast i8* %53 to { %Array*, %Array* }** + %55 = load { %Array*, %Array* }*, { %Array*, %Array* }** %54, align 8 + %56 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %55, i32 0, i32 0 + %57 = load %Array*, %Array** %56, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 1) + %58 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %55, i32 0, i32 1 + %59 = load %Array*, %Array** %58, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %59, i32 1) + %60 = bitcast { %Array*, %Array* }* %55 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %60, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %61 = add i64 %51, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %48, i32 1) + %62 = bitcast { %Array*, %Array*, %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %62, i32 1) + %63 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 2 + %64 = load { i64, %Array* }*, { i64, %Array* }** %63, align 8 + %65 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %64, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + %67 = call i64 @__quantum__rt__array_get_size_1d(%Array* %66) + %68 = sub i64 %67, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %69 = phi i64 [ 0, %exit__4 ], [ %80, %exiting__5 ] + %70 = icmp sle i64 %69, %68 + br i1 %70, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 %69) + %72 = bitcast i8* %71 to { { double, double }*, %Array* }** + %73 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %72, align 8 + %74 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %73, i32 0, i32 0 + %75 = load { double, double }*, { double, double }** %74, align 8 + %76 = bitcast { double, double }* %75 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %76, i32 1) + %77 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %73, i32 0, i32 1 + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %78, i32 1) + %79 = bitcast { { double, double }*, %Array* }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %80 = add i64 %69, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + %81 = bitcast { i64, %Array* }* %64 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %81, i32 1) + %82 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %82, i32 1) + %83 = call { { i64, i64 }*, { double, %Callable* }* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___QubitizationOracleSeperatedRegisters____body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData) + %84 = getelementptr inbounds { { i64, i64 }*, { double, %Callable* }* }, { { i64, i64 }*, { double, %Callable* }* }* %83, i32 0, i32 0 + %85 = load { i64, i64 }*, { i64, i64 }** %84, align 8 + %86 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %85, i32 0, i32 0 + %nCtrlRegisterQubits = load i64, i64* %86, align 4 + %87 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %85, i32 0, i32 1 + %nTargetRegisterQubits = load i64, i64* %87, align 4 + %88 = getelementptr inbounds { { i64, i64 }*, { double, %Callable* }* }, { { i64, i64 }*, { double, %Callable* }* }* %83, i32 0, i32 1 + %89 = load { double, %Callable* }*, { double, %Callable* }** %88, align 8 + %90 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %89, i32 0, i32 0 + %oneNorm = load double, double* %90, align 8 + %91 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %89, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %91, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %nQubits = add i64 %nCtrlRegisterQubits, %nTargetRegisterQubits + %92 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %93 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, i64 }* getelementptr ({ %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* null, i32 1) to i64)) + %94 = bitcast %Tuple* %93 to { %Callable*, %Callable*, i64 }* + %95 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %94, i32 0, i32 0 + %96 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %94, i32 0, i32 1 + %97 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %94, i32 0, i32 2 + store %Callable* %92, %Callable** %95, align 8 + store %Callable* %oracle, %Callable** %96, align 8 + store i64 %nTargetRegisterQubits, i64* %97, align 4 + %98 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__8__FunctionTable, %Tuple* %93) + %99 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Callable* }* getelementptr ({ double, %Callable* }, { double, %Callable* }* null, i32 1) to i64)) + %100 = bitcast %Tuple* %99 to { double, %Callable* }* + %101 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %100, i32 0, i32 0 + %102 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %100, i32 0, i32 1 + store double %oneNorm, double* %101, align 8 + store %Callable* %98, %Callable** %102, align 8 + %103 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { double, %Callable* }* }* getelementptr ({ i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* null, i32 1) to i64)) + %104 = bitcast %Tuple* %103 to { i64, { double, %Callable* }* }* + %105 = getelementptr inbounds { i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* %104, i32 0, i32 0 + %106 = getelementptr inbounds { i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* %104, i32 0, i32 1 + store i64 %nQubits, i64* %105, align 4 + store { double, %Callable* }* %100, { double, %Callable* }** %106, align 8 + %107 = sub i64 %4, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %108 = phi i64 [ 0, %exit__5 ], [ %118, %exiting__6 ] + %109 = icmp sle i64 %108, %107 + br i1 %109, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %108) + %111 = bitcast i8* %110 to { %Array*, %Array* }** + %112 = load { %Array*, %Array* }*, { %Array*, %Array* }** %111, align 8 + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %112, i32 0, i32 0 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 -1) + %115 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %112, i32 0, i32 1 + %116 = load %Array*, %Array** %115, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %116, i32 -1) + %117 = bitcast { %Array*, %Array* }* %112 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %117, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %118 = add i64 %108, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + %119 = sub i64 %19, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %120 = phi i64 [ 0, %exit__6 ], [ %130, %exiting__7 ] + %121 = icmp sle i64 %120, %119 + br i1 %121, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %122 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %120) + %123 = bitcast i8* %122 to { %Array*, %Array* }** + %124 = load { %Array*, %Array* }*, { %Array*, %Array* }** %123, align 8 + %125 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %124, i32 0, i32 0 + %126 = load %Array*, %Array** %125, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %126, i32 -1) + %127 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %124, i32 0, i32 1 + %128 = load %Array*, %Array** %127, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %128, i32 -1) + %129 = bitcast { %Array*, %Array* }* %124 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %129, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %130 = add i64 %120, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + %131 = sub i64 %34, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %132 = phi i64 [ 0, %exit__7 ], [ %142, %exiting__8 ] + %133 = icmp sle i64 %132, %131 + br i1 %133, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %134 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %132) + %135 = bitcast i8* %134 to { %Array*, %Array* }** + %136 = load { %Array*, %Array* }*, { %Array*, %Array* }** %135, align 8 + %137 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %136, i32 0, i32 0 + %138 = load %Array*, %Array** %137, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %138, i32 -1) + %139 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %136, i32 0, i32 1 + %140 = load %Array*, %Array** %139, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 -1) + %141 = bitcast { %Array*, %Array* }* %136 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %142 = add i64 %132, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 -1) + %143 = sub i64 %49, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %144 = phi i64 [ 0, %exit__8 ], [ %154, %exiting__9 ] + %145 = icmp sle i64 %144, %143 + br i1 %145, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %48, i64 %144) + %147 = bitcast i8* %146 to { %Array*, %Array* }** + %148 = load { %Array*, %Array* }*, { %Array*, %Array* }** %147, align 8 + %149 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %148, i32 0, i32 0 + %150 = load %Array*, %Array** %149, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %150, i32 -1) + %151 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %148, i32 0, i32 1 + %152 = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %152, i32 -1) + %153 = bitcast { %Array*, %Array* }* %148 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %153, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %154 = add i64 %144, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %48, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %62, i32 -1) + %155 = sub i64 %67, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %156 = phi i64 [ 0, %exit__9 ], [ %167, %exiting__10 ] + %157 = icmp sle i64 %156, %155 + br i1 %157, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %158 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 %156) + %159 = bitcast i8* %158 to { { double, double }*, %Array* }** + %160 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %159, align 8 + %161 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %160, i32 0, i32 0 + %162 = load { double, double }*, { double, double }** %161, align 8 + %163 = bitcast { double, double }* %162 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %163, i32 -1) + %164 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %160, i32 0, i32 1 + %165 = load %Array*, %Array** %164, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %165, i32 -1) + %166 = bitcast { { double, double }*, %Array* }* %160 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %166, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %167 = add i64 %156, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %81, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %82, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + %168 = bitcast { i64, i64 }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %168, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 -1) + %169 = bitcast { double, %Callable* }* %89 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %169, i32 -1) + %170 = bitcast { { i64, i64 }*, { double, %Callable* }* }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %170, i32 -1) + ret { i64, { double, %Callable* }* }* %104 +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Array* }* %2, { i64, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %3, %Array* %4) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Array* }*, { i64, %Array* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { double, double }*, %Array* }** + %13 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 0 + %15 = load { double, double }*, { double, double }** %14, align 8 + %16 = bitcast { double, double }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 %count-change) + %19 = bitcast { { double, double }*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 %count-change) + %21 = bitcast { i64, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Array* }*, { i64, %Array* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { double, double }*, %Array* }** + %13 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 0 + %15 = load { double, double }*, { double, double }** %14, align 8 + %16 = bitcast { double, double }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 %count-change) + %19 = bitcast { { double, double }*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 %count-change) + %21 = bitcast { i64, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %arg-tuple to { { %Callable* }*, %Array* }* + %4 = getelementptr inbounds { { %Callable* }*, %Array* }, { { %Callable* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Callable* }*, { %Callable* }** %4, align 8 + %6 = getelementptr inbounds { { %Callable* }*, %Array* }, { { %Callable* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { %Callable* }*, %Array* }* getelementptr ({ i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { i64, { %Callable* }*, %Array* }* + %10 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %9, i32 0, i32 2 + store i64 %2, i64* %10, align 4 + store { %Callable* }* %5, { %Callable* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Characterization__RobustPhaseEstimation__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { %Callable* }*, %Array* }* + %1 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %0, i32 0, i32 2 + %4 = load i64, i64* %1, align 4 + %5 = load { %Callable* }*, { %Callable* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + %7 = call double @Microsoft__Quantum__Characterization__RobustPhaseEstimation__body(i64 %4, { %Callable* }* %5, %Array* %6) + %8 = bitcast %Tuple* %result-tuple to { double }* + %9 = getelementptr inbounds { double }, { double }* %8, i32 0, i32 0 + store double %7, double* %9, align 8 + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Simulation__EstimateEnergy__body(i64 %nQubits, %Callable* %statePrepUnitary, %Callable* %qpeUnitary, %Callable* %phaseEstAlgorithm) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %qpeUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qpeUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %phaseEst = call double @Microsoft__Quantum__Simulation__EstimateEnergyWithAdiabaticEvolution__body(i64 %nQubits, %Callable* %statePrepUnitary, %Callable* %0, %Callable* %qpeUnitary, %Callable* %phaseEstAlgorithm) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %qpeUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qpeUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret double %phaseEst +} + +declare double @__quantum__qis__sin__body(double) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %stateData, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %stateData, i32 0, i32 1 + %terms = load %Array*, %Array** %0, align 8 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %terms) + %1 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 1) + %14 = bitcast { i64, %Array* }* %stateData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %15 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %stateData, i32 0, i32 0 + %stateType = load i64, i64* %15, align 4 + %16 = sub i64 %nTerms, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %17) + %20 = bitcast i8* %19 to { { double, double }*, %Array* }** + %21 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %21, i32 0, i32 0 + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %21, i32 0, i32 1 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = bitcast { { double, double }*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 1) + %29 = icmp eq i64 %stateType, 2 + br i1 %29, label %then0__1, label %test1__1 + +then0__1: ; preds = %exit__2 + %30 = call i1 @Microsoft__Quantum__Arrays___0fcd31919d144fe58f058d4e79e5219d_IsEmpty__body(%Array* %terms) + br i1 %30, label %then0__2, label %test1__2 + +then0__2: ; preds = %then0__1 + br label %continue__2 + +test1__2: ; preds = %then0__1 + %31 = icmp eq i64 %nTerms, 1 + br i1 %31, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 0) + %33 = bitcast i8* %32 to { { double, double }*, %Array* }** + %34 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %33, align 8 + %35 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %34, i32 0, i32 0 + %coefficient = load { double, double }*, { double, double }** %35, align 8 + %36 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 1) + %37 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %34, i32 0, i32 1 + %qubitIndices = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %qubitIndices, %Array* %qubits) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + br label %continue__2 + +else__1: ; preds = %test1__2 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSparseMultiConfigurationalState__body(%Callable* %38, %Array* %terms, %Array* %qubits) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %38, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %then1__1, %then0__2 + br label %continue__1 + +test1__1: ; preds = %exit__2 + %39 = icmp eq i64 %stateType, 3 + br i1 %39, label %then1__2, label %continue__1 + +then1__2: ; preds = %test1__1 + %40 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %41 = sub i64 %nTerms, 1 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %41) + %43 = bitcast i8* %42 to { { double, double }*, %Array* }** + %44 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %43, align 8 + %45 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %44, i32 0, i32 0 + %46 = load { double, double }*, { double, double }** %45, align 8 + %47 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %44, i32 0, i32 1 + %48 = load %Array*, %Array** %47, align 8 + %49 = bitcast { double, double }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %48, i32 1) + %50 = bitcast { { double, double }*, %Array* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 1) + %51 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to { { double, double }*, %Array* }** + store { { double, double }*, %Array* }* %44, { { double, double }*, %Array* }** %53, align 8 + %54 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %55 = bitcast %Tuple* %54 to { %Callable*, i64, %Array* }* + %56 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 0 + %57 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 1 + %58 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 2 + store %Callable* %40, %Callable** %56, align 8 + store i64 2, i64* %57, align 4 + store %Array* %51, %Array** %58, align 8 + %referenceState = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__6__FunctionTable, %Tuple* %54) + call void @__quantum__rt__capture_update_alias_count(%Callable* %referenceState, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %referenceState, i32 1) + %59 = sub i64 %nTerms, 2 + %60 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %59, 2 + %61 = call %Array* @__quantum__rt__array_slice_1d(%Array* %terms, %Range %60, i1 true) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareUnitaryCoupledClusterState__body(%Callable* %referenceState, %Array* %61, double 1.000000e+00, %Array* %qubits) + call void @__quantum__rt__capture_update_alias_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %61, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__2, %test1__1, %continue__2 + %62 = sub i64 %nTerms, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %continue__1 + %63 = phi i64 [ 0, %continue__1 ], [ %74, %exiting__3 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %63) + %66 = bitcast i8* %65 to { { double, double }*, %Array* }** + %67 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %66, align 8 + %68 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %67, i32 0, i32 0 + %69 = load { double, double }*, { double, double }** %68, align 8 + %70 = bitcast { double, double }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + %71 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %67, i32 0, i32 1 + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = bitcast { { double, double }*, %Array* }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %74 = add i64 %63, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %75 = sub i64 %nTerms, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %76 = phi i64 [ 0, %exit__3 ], [ %87, %exiting__4 ] + %77 = icmp sle i64 %76, %75 + br i1 %77, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %76) + %79 = bitcast i8* %78 to { { double, double }*, %Array* }** + %80 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %79, align 8 + %81 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %80, i32 0, i32 0 + %82 = load { double, double }*, { double, double }** %81, align 8 + %83 = bitcast { double, double }* %82 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %83, i32 -1) + %84 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %80, i32 0, i32 1 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { { double, double }*, %Array* }* %80 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %87 = add i64 %76, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 -1) + ret void +} + +define internal double @Microsoft__Quantum__Characterization__RobustPhaseEstimation__body(i64 %bitsPrecision, { %Callable* }* %oracle, %Array* %targetState) { +entry: + %pPlus = alloca double, align 8 + %pZero = alloca double, align 8 + %nRepeats = alloca i64, align 8 + %thetaEst = alloca double, align 8 + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %oracle, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %oracle to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 1) + store double 0.000000e+00, double* %thetaEst, align 8 + %controlQubit = call %Qubit* @__quantum__rt__qubit_allocate() + %3 = sub i64 %bitsPrecision, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %exponent = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %4 = icmp sle i64 %exponent, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = trunc i64 %exponent to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %power = fptosi double %6 to i64 + %7 = sub i64 %bitsPrecision, %exponent + %8 = sitofp i64 %7 to double + %9 = fmul double 2.500000e+00, %8 + %10 = fadd double %9, 5.000000e-01 + %11 = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %10) + store i64 %11, i64* %nRepeats, align 4 + %12 = srem i64 %11, 2 + %13 = icmp eq i64 %12, 1 + br i1 %13, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %14 = add i64 %11, 1 + store i64 %14, i64* %nRepeats, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + store double 0.000000e+00, double* %pZero, align 8 + store double 0.000000e+00, double* %pPlus, align 8 + %15 = load i64, i64* %nRepeats, align 4 + %16 = sub i64 %15, 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %17 = add i64 %exponent, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %controlQubit) + %18 = load double, double* %thetaEst, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %controlQubit) + ret double %18 + +header__2: ; preds = %exiting__2, %continue__1 + %idxRep = phi i64 [ 0, %continue__1 ], [ %20, %exiting__2 ] + %19 = icmp sle i64 %idxRep, %16 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + br label %header__3 + +exiting__2: ; preds = %exit__3 + %20 = add i64 %idxRep, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %21 = load double, double* %pPlus, align 8 + %22 = sitofp i64 %15 to double + %23 = fdiv double %22, 2.000000e+00 + %y = fsub double %21, %23 + %24 = load double, double* %pZero, align 8 + %25 = sitofp i64 %15 to double + %26 = fdiv double %25, 2.000000e+00 + %x = fsub double %24, %26 + %deltaTheta = call double @__quantum__qis__arctan2__body(double %y, double %x) + %27 = load double, double* %thetaEst, align 8 + %28 = sitofp i64 %power to double + %29 = fmul double %27, %28 + %30 = fsub double %deltaTheta, %29 + %31 = call double @Microsoft__Quantum__Math__PI__body() + %32 = fmul double 2.000000e+00, %31 + %33 = call double @Microsoft__Quantum__Math__PI__body() + %34 = fneg double %33 + %delta = call double @Microsoft__Quantum__Math__RealMod__body(double %30, double %32, double %34) + %35 = sitofp i64 %power to double + %36 = fdiv double %delta, %35 + %37 = fadd double %27, %36 + store double %37, double* %thetaEst, align 8 + br label %exiting__1 + +header__3: ; preds = %exiting__3, %body__2 + %idxExperiment = phi i64 [ 0, %body__2 ], [ %52, %exiting__3 ] + %38 = icmp sle i64 %idxExperiment, 1 + br i1 %38, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %39 = call double @Microsoft__Quantum__Math__PI__body() + %40 = sitofp i64 %idxExperiment to double + %41 = fmul double %39, %40 + %42 = fdiv double %41, 2.000000e+00 + %43 = sitofp i64 %power to double + %rotation = fdiv double %42, %43 + call void @Microsoft__Quantum__Characterization__DiscretePhaseEstimationIteration__body({ %Callable* }* %oracle, i64 %power, double %rotation, %Array* %targetState, %Qubit* %controlQubit) + %result = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %controlQubit) + %44 = call %Result* @__quantum__rt__result_get_zero() + %45 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %44) + br i1 %45, label %then0__2, label %continue__2 + +then0__2: ; preds = %body__3 + %46 = icmp eq i64 %idxExperiment, 0 + br i1 %46, label %then0__3, label %test1__1 + +then0__3: ; preds = %then0__2 + %47 = load double, double* %pZero, align 8 + %48 = fadd double %47, 1.000000e+00 + store double %48, double* %pZero, align 8 + br label %continue__3 + +test1__1: ; preds = %then0__2 + %49 = icmp eq i64 %idxExperiment, 1 + br i1 %49, label %then1__1, label %continue__3 + +then1__1: ; preds = %test1__1 + %50 = load double, double* %pPlus, align 8 + %51 = fadd double %50, 1.000000e+00 + store double %51, double* %pPlus, align 8 + br label %continue__3 + +continue__3: ; preds = %then1__1, %test1__1, %then0__3 + br label %continue__2 + +continue__2: ; preds = %continue__3, %body__3 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %controlQubit) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %continue__2 + %52 = add i64 %idxExperiment, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + br label %exiting__2 +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +define { double, double }* @Microsoft__Quantum__Chemistry__Samples__Hydrogen__GetEnergyByTrotterization__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i64 %nBitsPrecision, double %trotterStepSize, i64 %trotterOrder) { +entry: + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 1 + %fermionTermData = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %fermionTermData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 2 + %statePrepData = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %statePrepData, i32 0, i32 1 + %64 = load %Array*, %Array** %63, align 8 + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %64) + %66 = sub i64 %65, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %67 = phi i64 [ 0, %exit__4 ], [ %78, %exiting__5 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %67) + %70 = bitcast i8* %69 to { { double, double }*, %Array* }** + %71 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %70, align 8 + %72 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 0 + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %74, i32 1) + %75 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { { double, double }*, %Array* }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %78 = add i64 %67, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %79 = bitcast { i64, %Array* }* %statePrepData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + %81 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 0 + %nSpinOrbitals = load i64, i64* %81, align 4 + %82 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %83 = phi i64 [ 0, %exit__5 ], [ %93, %exiting__6 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %83) + %86 = bitcast i8* %85 to { %Array*, %Array* }** + %87 = load { %Array*, %Array* }*, { %Array*, %Array* }** %86, align 8 + %88 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %89, i32 1) + %90 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 1 + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 1) + %92 = bitcast { %Array*, %Array* }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %92, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %93 = add i64 %83, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %94 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %95 = phi i64 [ 0, %exit__6 ], [ %105, %exiting__7 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %95) + %98 = bitcast i8* %97 to { %Array*, %Array* }** + %99 = load { %Array*, %Array* }*, { %Array*, %Array* }** %98, align 8 + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 0 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %101, i32 1) + %102 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { %Array*, %Array* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %105 = add i64 %95, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %106 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %107 = phi i64 [ 0, %exit__7 ], [ %117, %exiting__8 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %107) + %110 = bitcast i8* %109 to { %Array*, %Array* }** + %111 = load { %Array*, %Array* }*, { %Array*, %Array* }** %110, align 8 + %112 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 0 + %113 = load %Array*, %Array** %112, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %113, i32 1) + %114 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 1 + %115 = load %Array*, %Array** %114, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %115, i32 1) + %116 = bitcast { %Array*, %Array* }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %117 = add i64 %107, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %118 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %119 = phi i64 [ 0, %exit__8 ], [ %129, %exiting__9 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %129 = add i64 %119, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %130 = sub i64 %65, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %131 = phi i64 [ 0, %exit__9 ], [ %142, %exiting__10 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %131) + %134 = bitcast i8* %133 to { { double, double }*, %Array* }** + %135 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %134, align 8 + %136 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 0 + %137 = load { double, double }*, { double, double }** %136, align 8 + %138 = bitcast { double, double }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %138, i32 1) + %139 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 1 + %140 = load %Array*, %Array** %139, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + %141 = bitcast { { double, double }*, %Array* }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %142 = add i64 %131, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %143 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 3 + %energyOffset = load double, double* %143, align 8 + %144 = call { i64, { double, %Callable* }* }* @Microsoft__Quantum__Chemistry__JordanWigner__TrotterStepOracle__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, double %trotterStepSize, i64 %trotterOrder) + %145 = getelementptr inbounds { i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* %144, i32 0, i32 0 + %nQubits = load i64, i64* %145, align 4 + %146 = getelementptr inbounds { i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* %144, i32 0, i32 1 + %147 = load { double, %Callable* }*, { double, %Callable* }** %146, align 8 + %148 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %147, i32 0, i32 0 + %rescaleFactor = load double, double* %148, align 8 + %149 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %147, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %149, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %150 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %151 = sub i64 %65, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %152 = phi i64 [ 0, %exit__10 ], [ %163, %exiting__11 ] + %153 = icmp sle i64 %152, %151 + br i1 %153, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %152) + %155 = bitcast i8* %154 to { { double, double }*, %Array* }** + %156 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %156, i32 0, i32 0 + %158 = load { double, double }*, { double, double }** %157, align 8 + %159 = bitcast { double, double }* %158 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %159, i32 1) + %160 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %156, i32 0, i32 1 + %161 = load %Array*, %Array** %160, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 1) + %162 = bitcast { { double, double }*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %162, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %163 = add i64 %152, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_reference_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 1) + %164 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Array* }* }* getelementptr ({ %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* null, i32 1) to i64)) + %165 = bitcast %Tuple* %164 to { %Callable*, { i64, %Array* }* }* + %166 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %165, i32 0, i32 0 + %167 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %165, i32 0, i32 1 + store %Callable* %150, %Callable** %166, align 8 + store { i64, %Array* }* %statePrepData, { i64, %Array* }** %167, align 8 + %statePrep = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %164) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrep, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrep, i32 1) + %168 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Characterization__RobustPhaseEstimation__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %169 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64 }* getelementptr ({ %Callable*, i64 }, { %Callable*, i64 }* null, i32 1) to i64)) + %170 = bitcast %Tuple* %169 to { %Callable*, i64 }* + %171 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %170, i32 0, i32 0 + %172 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %170, i32 0, i32 1 + store %Callable* %168, %Callable** %171, align 8 + store i64 %nBitsPrecision, i64* %172, align 4 + %phaseEstAlgorithm = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %169) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + %estPhase = call double @Microsoft__Quantum__Simulation__EstimateEnergy__body(i64 %nQubits, %Callable* %statePrep, %Callable* %oracle, %Callable* %phaseEstAlgorithm) + %173 = fmul double %estPhase, %rescaleFactor + %estEnergy = fadd double %173, %energyOffset + %174 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %175 = bitcast %Tuple* %174 to { double, double }* + %176 = getelementptr inbounds { double, double }, { double, double }* %175, i32 0, i32 0 + %177 = getelementptr inbounds { double, double }, { double, double }* %175, i32 0, i32 1 + store double %estPhase, double* %176, align 8 + store double %estEnergy, double* %177, align 8 + %178 = sub i64 %3, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %179 = phi i64 [ 0, %exit__11 ], [ %189, %exiting__12 ] + %180 = icmp sle i64 %179, %178 + br i1 %180, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %179) + %182 = bitcast i8* %181 to { %Array*, %Array* }** + %183 = load { %Array*, %Array* }*, { %Array*, %Array* }** %182, align 8 + %184 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %183, i32 0, i32 0 + %185 = load %Array*, %Array** %184, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %185, i32 -1) + %186 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %183, i32 0, i32 1 + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %187, i32 -1) + %188 = bitcast { %Array*, %Array* }* %183 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %188, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %189 = add i64 %179, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %190 = sub i64 %18, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %191 = phi i64 [ 0, %exit__12 ], [ %201, %exiting__13 ] + %192 = icmp sle i64 %191, %190 + br i1 %192, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %193 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %191) + %194 = bitcast i8* %193 to { %Array*, %Array* }** + %195 = load { %Array*, %Array* }*, { %Array*, %Array* }** %194, align 8 + %196 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %195, i32 0, i32 0 + %197 = load %Array*, %Array** %196, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %197, i32 -1) + %198 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %195, i32 0, i32 1 + %199 = load %Array*, %Array** %198, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %199, i32 -1) + %200 = bitcast { %Array*, %Array* }* %195 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %200, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %201 = add i64 %191, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %202 = sub i64 %33, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %203 = phi i64 [ 0, %exit__13 ], [ %213, %exiting__14 ] + %204 = icmp sle i64 %203, %202 + br i1 %204, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %205 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %203) + %206 = bitcast i8* %205 to { %Array*, %Array* }** + %207 = load { %Array*, %Array* }*, { %Array*, %Array* }** %206, align 8 + %208 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %207, i32 0, i32 0 + %209 = load %Array*, %Array** %208, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %209, i32 -1) + %210 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %207, i32 0, i32 1 + %211 = load %Array*, %Array** %210, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %211, i32 -1) + %212 = bitcast { %Array*, %Array* }* %207 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %212, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %213 = add i64 %203, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %214 = sub i64 %48, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %215 = phi i64 [ 0, %exit__14 ], [ %225, %exiting__15 ] + %216 = icmp sle i64 %215, %214 + br i1 %216, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %217 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %215) + %218 = bitcast i8* %217 to { %Array*, %Array* }** + %219 = load { %Array*, %Array* }*, { %Array*, %Array* }** %218, align 8 + %220 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %219, i32 0, i32 0 + %221 = load %Array*, %Array** %220, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %221, i32 -1) + %222 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %219, i32 0, i32 1 + %223 = load %Array*, %Array** %222, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %223, i32 -1) + %224 = bitcast { %Array*, %Array* }* %219 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %224, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %225 = add i64 %215, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %226 = sub i64 %65, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %227 = phi i64 [ 0, %exit__15 ], [ %238, %exiting__16 ] + %228 = icmp sle i64 %227, %226 + br i1 %228, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %229 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %227) + %230 = bitcast i8* %229 to { { double, double }*, %Array* }** + %231 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %230, align 8 + %232 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %231, i32 0, i32 0 + %233 = load { double, double }*, { double, double }** %232, align 8 + %234 = bitcast { double, double }* %233 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %234, i32 -1) + %235 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %231, i32 0, i32 1 + %236 = load %Array*, %Array** %235, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 -1) + %237 = bitcast { { double, double }*, %Array* }* %231 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %237, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %238 = add i64 %227, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + %239 = sub i64 %3, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %240 = phi i64 [ 0, %exit__16 ], [ %250, %exiting__17 ] + %241 = icmp sle i64 %240, %239 + br i1 %241, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %242 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %240) + %243 = bitcast i8* %242 to { %Array*, %Array* }** + %244 = load { %Array*, %Array* }*, { %Array*, %Array* }** %243, align 8 + %245 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %244, i32 0, i32 0 + %246 = load %Array*, %Array** %245, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 -1) + %247 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %244, i32 0, i32 1 + %248 = load %Array*, %Array** %247, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %248, i32 -1) + %249 = bitcast { %Array*, %Array* }* %244 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %249, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %250 = add i64 %240, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %251 = sub i64 %18, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %252 = phi i64 [ 0, %exit__17 ], [ %262, %exiting__18 ] + %253 = icmp sle i64 %252, %251 + br i1 %253, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %254 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %252) + %255 = bitcast i8* %254 to { %Array*, %Array* }** + %256 = load { %Array*, %Array* }*, { %Array*, %Array* }** %255, align 8 + %257 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %256, i32 0, i32 0 + %258 = load %Array*, %Array** %257, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %258, i32 -1) + %259 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %256, i32 0, i32 1 + %260 = load %Array*, %Array** %259, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %260, i32 -1) + %261 = bitcast { %Array*, %Array* }* %256 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %261, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %262 = add i64 %252, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %263 = sub i64 %33, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %264 = phi i64 [ 0, %exit__18 ], [ %274, %exiting__19 ] + %265 = icmp sle i64 %264, %263 + br i1 %265, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %266 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %264) + %267 = bitcast i8* %266 to { %Array*, %Array* }** + %268 = load { %Array*, %Array* }*, { %Array*, %Array* }** %267, align 8 + %269 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %268, i32 0, i32 0 + %270 = load %Array*, %Array** %269, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %270, i32 -1) + %271 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %268, i32 0, i32 1 + %272 = load %Array*, %Array** %271, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %272, i32 -1) + %273 = bitcast { %Array*, %Array* }* %268 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %273, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %274 = add i64 %264, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %275 = sub i64 %48, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %276 = phi i64 [ 0, %exit__19 ], [ %286, %exiting__20 ] + %277 = icmp sle i64 %276, %275 + br i1 %277, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %278 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %276) + %279 = bitcast i8* %278 to { %Array*, %Array* }** + %280 = load { %Array*, %Array* }*, { %Array*, %Array* }** %279, align 8 + %281 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %280, i32 0, i32 0 + %282 = load %Array*, %Array** %281, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %282, i32 -1) + %283 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %280, i32 0, i32 1 + %284 = load %Array*, %Array** %283, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %284, i32 -1) + %285 = bitcast { %Array*, %Array* }* %280 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %285, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %286 = add i64 %276, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %287 = sub i64 %65, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %288 = phi i64 [ 0, %exit__20 ], [ %299, %exiting__21 ] + %289 = icmp sle i64 %288, %287 + br i1 %289, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %290 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %288) + %291 = bitcast i8* %290 to { { double, double }*, %Array* }** + %292 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %291, align 8 + %293 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %292, i32 0, i32 0 + %294 = load { double, double }*, { double, double }** %293, align 8 + %295 = bitcast { double, double }* %294 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %295, i32 -1) + %296 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %292, i32 0, i32 1 + %297 = load %Array*, %Array** %296, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %297, i32 -1) + %298 = bitcast { { double, double }*, %Array* }* %292 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %298, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %299 = add i64 %288, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrep, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrep, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 -1) + %300 = bitcast { double, %Callable* }* %147 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %300, i32 -1) + %301 = bitcast { i64, { double, %Callable* }* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %301, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePrep, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePrep, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %phaseEstAlgorithm, i32 -1) + ret { double, double }* %175 +} + +define internal { i64, { double, %Callable* }* }* @Microsoft__Quantum__Chemistry__JordanWigner__TrotterStepOracle__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, double %trotterStepSize, i64 %trotterOrder) { +entry: + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 1 + %data = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %data to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 2 + %statePrepData = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %statePrepData, i32 0, i32 1 + %64 = load %Array*, %Array** %63, align 8 + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %64) + %66 = sub i64 %65, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %67 = phi i64 [ 0, %exit__4 ], [ %78, %exiting__5 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %67) + %70 = bitcast i8* %69 to { { double, double }*, %Array* }** + %71 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %70, align 8 + %72 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 0 + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %74, i32 1) + %75 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { { double, double }*, %Array* }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %78 = add i64 %67, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %79 = bitcast { i64, %Array* }* %statePrepData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + %81 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 0 + %nSpinOrbitals = load i64, i64* %81, align 4 + %82 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %83 = phi i64 [ 0, %exit__5 ], [ %93, %exiting__6 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %83) + %86 = bitcast i8* %85 to { %Array*, %Array* }** + %87 = load { %Array*, %Array* }*, { %Array*, %Array* }** %86, align 8 + %88 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %89, i32 1) + %90 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 1 + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 1) + %92 = bitcast { %Array*, %Array* }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %92, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %93 = add i64 %83, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %94 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %95 = phi i64 [ 0, %exit__6 ], [ %105, %exiting__7 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %95) + %98 = bitcast i8* %97 to { %Array*, %Array* }** + %99 = load { %Array*, %Array* }*, { %Array*, %Array* }** %98, align 8 + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 0 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %101, i32 1) + %102 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { %Array*, %Array* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %105 = add i64 %95, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %106 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %107 = phi i64 [ 0, %exit__7 ], [ %117, %exiting__8 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %107) + %110 = bitcast i8* %109 to { %Array*, %Array* }** + %111 = load { %Array*, %Array* }*, { %Array*, %Array* }** %110, align 8 + %112 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 0 + %113 = load %Array*, %Array** %112, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %113, i32 1) + %114 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 1 + %115 = load %Array*, %Array** %114, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %115, i32 1) + %116 = bitcast { %Array*, %Array* }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %117 = add i64 %107, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %118 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %119 = phi i64 [ 0, %exit__8 ], [ %129, %exiting__9 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %129 = add i64 %119, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %130 = sub i64 %65, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %131 = phi i64 [ 0, %exit__9 ], [ %142, %exiting__10 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %131) + %134 = bitcast i8* %133 to { { double, double }*, %Array* }** + %135 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %134, align 8 + %136 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 0 + %137 = load { double, double }*, { double, double }** %136, align 8 + %138 = bitcast { double, double }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %138, i32 1) + %139 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 1 + %140 = load %Array*, %Array** %139, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + %141 = bitcast { { double, double }*, %Array* }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %142 = add i64 %131, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %143 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 3 + %energyShift = load double, double* %143, align 8 + %generatorSystem = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %data) + %144 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %145 = load %Callable*, %Callable** %144, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %145, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %145, i32 1) + %146 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %146, i32 1) + %147 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionEvolutionSet__body() + %evolutionGenerator = call { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %147, { i64, %Callable* }* %generatorSystem) + %148 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %149 = load { %Callable* }*, { %Callable* }** %148, align 8 + %150 = getelementptr inbounds { %Callable* }, { %Callable* }* %149, i32 0, i32 0 + %151 = load %Callable*, %Callable** %150, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %151, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %151, i32 1) + %152 = bitcast { %Callable* }* %149 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %152, i32 1) + %153 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %154 = load { i64, %Callable* }*, { i64, %Callable* }** %153, align 8 + %155 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %154, i32 0, i32 1 + %156 = load %Callable*, %Callable** %155, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %156, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %156, i32 1) + %157 = bitcast { i64, %Callable* }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %157, i32 1) + %158 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %158, i32 1) + %159 = call { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 %trotterOrder) + %160 = getelementptr inbounds { %Callable* }, { %Callable* }* %159, i32 0, i32 0 + %simulationAlgorithm = load %Callable*, %Callable** %160, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %151, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %151, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %152, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %156, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %156, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %157, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %158, i32 1) + %161 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %162 = bitcast %Tuple* %161 to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %163 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %162, i32 0, i32 0 + %164 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %162, i32 0, i32 1 + %165 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %162, i32 0, i32 2 + store %Callable* %simulationAlgorithm, %Callable** %163, align 8 + store double %trotterStepSize, double* %164, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %165, align 8 + %oracle = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__7__FunctionTable, %Tuple* %161) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %rescaleFactor = fdiv double 1.000000e+00, %trotterStepSize + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %166 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Callable* }* getelementptr ({ double, %Callable* }, { double, %Callable* }* null, i32 1) to i64)) + %167 = bitcast %Tuple* %166 to { double, %Callable* }* + %168 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %167, i32 0, i32 0 + %169 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %167, i32 0, i32 1 + store double %rescaleFactor, double* %168, align 8 + store %Callable* %oracle, %Callable** %169, align 8 + %170 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { double, %Callable* }* }* getelementptr ({ i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* null, i32 1) to i64)) + %171 = bitcast %Tuple* %170 to { i64, { double, %Callable* }* }* + %172 = getelementptr inbounds { i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* %171, i32 0, i32 0 + %173 = getelementptr inbounds { i64, { double, %Callable* }* }, { i64, { double, %Callable* }* }* %171, i32 0, i32 1 + store i64 %nSpinOrbitals, i64* %172, align 4 + store { double, %Callable* }* %167, { double, %Callable* }** %173, align 8 + %174 = getelementptr inbounds { %Callable* }, { %Callable* }* %147, i32 0, i32 0 + %175 = load %Callable*, %Callable** %174, align 8 + %176 = sub i64 %3, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %177 = phi i64 [ 0, %exit__10 ], [ %187, %exiting__11 ] + %178 = icmp sle i64 %177, %176 + br i1 %178, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %179 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %177) + %180 = bitcast i8* %179 to { %Array*, %Array* }** + %181 = load { %Array*, %Array* }*, { %Array*, %Array* }** %180, align 8 + %182 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %181, i32 0, i32 0 + %183 = load %Array*, %Array** %182, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %183, i32 -1) + %184 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %181, i32 0, i32 1 + %185 = load %Array*, %Array** %184, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %185, i32 -1) + %186 = bitcast { %Array*, %Array* }* %181 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %186, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %187 = add i64 %177, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %188 = sub i64 %18, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %189 = phi i64 [ 0, %exit__11 ], [ %199, %exiting__12 ] + %190 = icmp sle i64 %189, %188 + br i1 %190, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %189) + %192 = bitcast i8* %191 to { %Array*, %Array* }** + %193 = load { %Array*, %Array* }*, { %Array*, %Array* }** %192, align 8 + %194 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %193, i32 0, i32 0 + %195 = load %Array*, %Array** %194, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %195, i32 -1) + %196 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %193, i32 0, i32 1 + %197 = load %Array*, %Array** %196, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %197, i32 -1) + %198 = bitcast { %Array*, %Array* }* %193 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %198, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %199 = add i64 %189, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %200 = sub i64 %33, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %201 = phi i64 [ 0, %exit__12 ], [ %211, %exiting__13 ] + %202 = icmp sle i64 %201, %200 + br i1 %202, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %201) + %204 = bitcast i8* %203 to { %Array*, %Array* }** + %205 = load { %Array*, %Array* }*, { %Array*, %Array* }** %204, align 8 + %206 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %205, i32 0, i32 0 + %207 = load %Array*, %Array** %206, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %207, i32 -1) + %208 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %205, i32 0, i32 1 + %209 = load %Array*, %Array** %208, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %209, i32 -1) + %210 = bitcast { %Array*, %Array* }* %205 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %210, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %211 = add i64 %201, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %212 = sub i64 %48, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %213 = phi i64 [ 0, %exit__13 ], [ %223, %exiting__14 ] + %214 = icmp sle i64 %213, %212 + br i1 %214, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %215 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %213) + %216 = bitcast i8* %215 to { %Array*, %Array* }** + %217 = load { %Array*, %Array* }*, { %Array*, %Array* }** %216, align 8 + %218 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %217, i32 0, i32 0 + %219 = load %Array*, %Array** %218, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %219, i32 -1) + %220 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %217, i32 0, i32 1 + %221 = load %Array*, %Array** %220, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %221, i32 -1) + %222 = bitcast { %Array*, %Array* }* %217 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %222, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %223 = add i64 %213, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %224 = sub i64 %65, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %225 = phi i64 [ 0, %exit__14 ], [ %236, %exiting__15 ] + %226 = icmp sle i64 %225, %224 + br i1 %226, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %227 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %225) + %228 = bitcast i8* %227 to { { double, double }*, %Array* }** + %229 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %228, align 8 + %230 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %229, i32 0, i32 0 + %231 = load { double, double }*, { double, double }** %230, align 8 + %232 = bitcast { double, double }* %231 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %232, i32 -1) + %233 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %229, i32 0, i32 1 + %234 = load %Array*, %Array** %233, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %234, i32 -1) + %235 = bitcast { { double, double }*, %Array* }* %229 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %235, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %236 = add i64 %225, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + %237 = sub i64 %3, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %238 = phi i64 [ 0, %exit__15 ], [ %248, %exiting__16 ] + %239 = icmp sle i64 %238, %237 + br i1 %239, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %240 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %238) + %241 = bitcast i8* %240 to { %Array*, %Array* }** + %242 = load { %Array*, %Array* }*, { %Array*, %Array* }** %241, align 8 + %243 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %242, i32 0, i32 0 + %244 = load %Array*, %Array** %243, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %244, i32 -1) + %245 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %242, i32 0, i32 1 + %246 = load %Array*, %Array** %245, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 -1) + %247 = bitcast { %Array*, %Array* }* %242 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %248 = add i64 %238, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %249 = sub i64 %18, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %250 = phi i64 [ 0, %exit__16 ], [ %260, %exiting__17 ] + %251 = icmp sle i64 %250, %249 + br i1 %251, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %252 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %250) + %253 = bitcast i8* %252 to { %Array*, %Array* }** + %254 = load { %Array*, %Array* }*, { %Array*, %Array* }** %253, align 8 + %255 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %254, i32 0, i32 0 + %256 = load %Array*, %Array** %255, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %256, i32 -1) + %257 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %254, i32 0, i32 1 + %258 = load %Array*, %Array** %257, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %258, i32 -1) + %259 = bitcast { %Array*, %Array* }* %254 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %259, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %260 = add i64 %250, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %261 = sub i64 %33, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %262 = phi i64 [ 0, %exit__17 ], [ %272, %exiting__18 ] + %263 = icmp sle i64 %262, %261 + br i1 %263, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %264 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %262) + %265 = bitcast i8* %264 to { %Array*, %Array* }** + %266 = load { %Array*, %Array* }*, { %Array*, %Array* }** %265, align 8 + %267 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %266, i32 0, i32 0 + %268 = load %Array*, %Array** %267, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %268, i32 -1) + %269 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %266, i32 0, i32 1 + %270 = load %Array*, %Array** %269, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %270, i32 -1) + %271 = bitcast { %Array*, %Array* }* %266 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %271, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %272 = add i64 %262, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %273 = sub i64 %48, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %274 = phi i64 [ 0, %exit__18 ], [ %284, %exiting__19 ] + %275 = icmp sle i64 %274, %273 + br i1 %275, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %276 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %274) + %277 = bitcast i8* %276 to { %Array*, %Array* }** + %278 = load { %Array*, %Array* }*, { %Array*, %Array* }** %277, align 8 + %279 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %278, i32 0, i32 0 + %280 = load %Array*, %Array** %279, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %280, i32 -1) + %281 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %278, i32 0, i32 1 + %282 = load %Array*, %Array** %281, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %282, i32 -1) + %283 = bitcast { %Array*, %Array* }* %278 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %283, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %284 = add i64 %274, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %285 = sub i64 %65, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %286 = phi i64 [ 0, %exit__19 ], [ %297, %exiting__20 ] + %287 = icmp sle i64 %286, %285 + br i1 %287, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %288 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %286) + %289 = bitcast i8* %288 to { { double, double }*, %Array* }** + %290 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %289, align 8 + %291 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %290, i32 0, i32 0 + %292 = load { double, double }*, { double, double }** %291, align 8 + %293 = bitcast { double, double }* %292 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %293, i32 -1) + %294 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %290, i32 0, i32 1 + %295 = load %Array*, %Array** %294, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %295, i32 -1) + %296 = bitcast { { double, double }*, %Array* }* %290 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %296, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %297 = add i64 %286, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %145, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %145, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %146, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %151, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %151, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %152, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %156, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %156, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %157, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %158, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %145, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %145, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %146, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %175, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %175, i32 -1) + %298 = bitcast { %Callable* }* %147 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %298, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %151, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %151, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %152, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %156, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %156, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %157, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %158, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + %299 = bitcast { %Callable* }* %159 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %299, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 -1) + ret { i64, { double, %Callable* }* }* %171 +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Array* }* %2, { i64, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %arg-tuple to { { %Callable* }*, %Array* }* + %4 = getelementptr inbounds { { %Callable* }*, %Array* }, { { %Callable* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Callable* }*, { %Callable* }** %4, align 8 + %6 = getelementptr inbounds { { %Callable* }*, %Array* }, { { %Callable* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { %Callable* }*, %Array* }* getelementptr ({ i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { i64, { %Callable* }*, %Array* }* + %10 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { i64, { %Callable* }*, %Array* }, { i64, { %Callable* }*, %Array* }* %9, i32 0, i32 2 + store i64 %2, i64* %10, align 4 + store { %Callable* }* %5, { %Callable* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, i64 }, { %Callable*, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWigner0123Term_____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %v0123 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %11 = load i64, i64* %8, align 4 + %12 = load i64, i64* %10, align 4 + %13 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 0) + %15 = bitcast i8* %14 to i64* + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 1) + %17 = bitcast i8* %16 to i64* + store i64 %11, i64* %15, align 4 + store i64 %12, i64* %17, align 4 + %qubitsPQ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %13, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %19 = bitcast i8* %18 to i64* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %21 = bitcast i8* %20 to i64* + %22 = load i64, i64* %19, align 4 + %23 = load i64, i64* %21, align 4 + %24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 0) + %26 = bitcast i8* %25 to i64* + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 1) + %28 = bitcast i8* %27 to i64* + store i64 %22, i64* %26, align 4 + store i64 %23, i64* %28, align 4 + %qubitsRS = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %24, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRS, i32 1) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %30 = bitcast i8* %29 to i64* + %31 = load i64, i64* %30, align 4 + %32 = add i64 %31, 1 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %34 = bitcast i8* %33 to i64* + %35 = load i64, i64* %34, align 4 + %36 = sub i64 %35, 1 + %37 = insertvalue %Range zeroinitializer, i64 %32, 0 + %38 = insertvalue %Range %37, i64 1, 1 + %39 = insertvalue %Range %38, i64 %36, 2 + %qubitsPQJW = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %39, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQJW, i32 1) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %41 = bitcast i8* %40 to i64* + %42 = load i64, i64* %41, align 4 + %43 = add i64 %42, 1 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %45 = bitcast i8* %44 to i64* + %46 = load i64, i64* %45, align 4 + %47 = sub i64 %46, 1 + %48 = insertvalue %Range zeroinitializer, i64 %43, 0 + %49 = insertvalue %Range %48, i64 1, 1 + %50 = insertvalue %Range %49, i64 %47, 2 + %qubitsRSJW = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %50, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRSJW, i32 1) + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 1, i2* %53, align 1 + store i2 1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 -1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 -1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 -1, i2* %93, align 1 + store i2 -1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 -1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 -1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %139 = phi i64 [ 0, %entry ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %145 = call %Range @Microsoft__Quantum__Arrays___4719fa27e9f8473381154ca8c04d2b3a_IndexRange__body(%Array* %ops) + %146 = extractvalue %Range %145, 0 + %147 = extractvalue %Range %145, 1 + %148 = extractvalue %Range %145, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %149 = icmp sgt i64 %147, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxOp = phi i64 [ %146, %preheader__1 ], [ %171, %exiting__2 ] + %150 = icmp sle i64 %idxOp, %148 + %151 = icmp sge i64 %idxOp, %148 + %152 = select i1 %149, i1 %150, i1 %151 + br i1 %152, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %153 = srem i64 %idxOp, 4 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %v0123, i64 %153) + %155 = bitcast i8* %154 to double* + %156 = load double, double* %155, align 8 + %157 = call i1 @Microsoft__Quantum__Chemistry__IsNotZero__body(double %156) + br i1 %157, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %158 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %idxOp) + %159 = bitcast i8* %158 to %Array** + %160 = load %Array*, %Array** %159, align 8 + %161 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitsPQJW) + %162 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitsRSJW) + %163 = add i64 %161, %162 + %164 = call %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %163, i2 -2) + %paulis = call %Array* @__quantum__rt__array_concatenate(%Array* %160, %Array* %164) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %165 = srem i64 %idxOp, 4 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %v0123, i64 %165) + %167 = bitcast i8* %166 to double* + %168 = load double, double* %167, align 8 + %theta = fmul double %stepSize, %168 + %169 = call %Array* @__quantum__rt__array_concatenate(%Array* %qubitsPQ, %Array* %qubitsRS) + call void @__quantum__rt__array_update_reference_count(%Array* %169, i32 1) + %170 = call %Array* @__quantum__rt__array_concatenate(%Array* %169, %Array* %qubitsPQJW) + call void @__quantum__rt__array_update_reference_count(%Array* %170, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %170, %Array* %qubitsRSJW) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %164, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %169, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %169, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %170, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %170, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__2 + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %171 = add i64 %idxOp, %147 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRS, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQJW, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRSJW, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %172 = phi i64 [ 0, %exit__2 ], [ %177, %exiting__3 ] + %173 = icmp sle i64 %172, 7 + br i1 %173, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %174 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %172) + %175 = bitcast i8* %174 to %Array** + %176 = load %Array*, %Array** %175, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %176, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %177 = add i64 %172, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsRS, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsPQJW, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsRSJW, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %178 = phi i64 [ 0, %exit__3 ], [ %183, %exiting__4 ] + %179 = icmp sle i64 %178, 7 + br i1 %179, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %180 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %178) + %181 = bitcast i8* %180 to %Array** + %182 = load %Array*, %Array** %181, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %182, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %183 = add i64 %178, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %indices, %Array* %array) { +entry: + %sliced = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %0 = icmp eq i64 %nSliced, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %3 = bitcast i8* %2 to i64* + %4 = load i64, i64* %3, align 4 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %4) + %6 = bitcast i8* %5 to %Qubit** + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nSliced) + %9 = sub i64 %nSliced, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %10 = phi i64 [ 0, %continue__1 ], [ %14, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to %Qubit** + store %Qubit* %7, %Qubit** %13, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %8, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %nSliced, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %27, %exiting__2 ] + %16 = icmp sle i64 %idx, %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %idx) + %20 = bitcast i8* %19 to i64* + %21 = load i64, i64* %20, align 4 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %23 = bitcast i8* %22 to %Qubit** + %24 = load %Qubit*, %Qubit** %23, align 8 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idx) + %26 = bitcast i8* %25 to %Qubit** + store %Qubit* %24, %Qubit** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %28 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + ret %Array* %28 +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) + +define internal %Range @Microsoft__Quantum__Arrays___4719fa27e9f8473381154ca8c04d2b3a_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = sub i64 %0, 1 + %9 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %8, 2 + %10 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %11 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %11) + %14 = bitcast i8* %13 to %Array** + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %11, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %9 +} + +define internal i1 @Microsoft__Quantum__Chemistry__IsNotZero__body(double %number) { +entry: + %0 = call double @Microsoft__Quantum__Math__AbsD__body(double %number) + %1 = call double @Microsoft__Quantum__Math__PowD__body(double 1.000000e+01, double -1.500000e+01) + %2 = fcmp ogt double %0, %1 + ret i1 %2 +} + +define internal %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %length, i2 %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 %value, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +declare void @__quantum__qis__exp__body(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWigner0123Term_____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__v0123__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__v0123__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__v0123__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %11 = load i64, i64* %8, align 4 + %12 = load i64, i64* %10, align 4 + %13 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 0) + %15 = bitcast i8* %14 to i64* + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 1) + %17 = bitcast i8* %16 to i64* + store i64 %11, i64* %15, align 4 + store i64 %12, i64* %17, align 4 + %__qsVar4__qubitsPQ__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %13, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsPQ__, i32 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %19 = bitcast i8* %18 to i64* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %21 = bitcast i8* %20 to i64* + %22 = load i64, i64* %19, align 4 + %23 = load i64, i64* %21, align 4 + %24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 0) + %26 = bitcast i8* %25 to i64* + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 1) + %28 = bitcast i8* %27 to i64* + store i64 %22, i64* %26, align 4 + store i64 %23, i64* %28, align 4 + %__qsVar5__qubitsRS__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %24, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar5__qubitsRS__, i32 1) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %30 = bitcast i8* %29 to i64* + %31 = load i64, i64* %30, align 4 + %32 = add i64 %31, 1 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %34 = bitcast i8* %33 to i64* + %35 = load i64, i64* %34, align 4 + %36 = sub i64 %35, 1 + %37 = insertvalue %Range zeroinitializer, i64 %32, 0 + %38 = insertvalue %Range %37, i64 1, 1 + %39 = insertvalue %Range %38, i64 %36, 2 + %__qsVar6__qubitsPQJW__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %39, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__qubitsPQJW__, i32 1) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %41 = bitcast i8* %40 to i64* + %42 = load i64, i64* %41, align 4 + %43 = add i64 %42, 1 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %45 = bitcast i8* %44 to i64* + %46 = load i64, i64* %45, align 4 + %47 = sub i64 %46, 1 + %48 = insertvalue %Range zeroinitializer, i64 %43, 0 + %49 = insertvalue %Range %48, i64 1, 1 + %50 = insertvalue %Range %49, i64 %47, 2 + %__qsVar7__qubitsRSJW__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %50, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__qubitsRSJW__, i32 1) + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 1, i2* %53, align 1 + store i2 1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 -1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 -1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 -1, i2* %93, align 1 + store i2 -1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 -1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 -1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %__qsVar8__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %139 = phi i64 [ 0, %entry ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__ops__, i32 1) + %145 = call %Range @Microsoft__Quantum__Arrays___4719fa27e9f8473381154ca8c04d2b3a_IndexRange__body(%Array* %__qsVar8__ops__) + %146 = extractvalue %Range %145, 0 + %147 = extractvalue %Range %145, 1 + %148 = extractvalue %Range %145, 2 + %149 = sub i64 %148, %146 + %150 = sdiv i64 %149, %147 + %151 = mul i64 %147, %150 + %152 = add i64 %146, %151 + %153 = sub i64 0, %147 + %154 = insertvalue %Range zeroinitializer, i64 %152, 0 + %155 = insertvalue %Range %154, i64 %153, 1 + %156 = insertvalue %Range %155, i64 %146, 2 + %157 = extractvalue %Range %156, 0 + %158 = extractvalue %Range %156, 1 + %159 = extractvalue %Range %156, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %160 = icmp sgt i64 %158, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar9__idxOp__ = phi i64 [ %157, %preheader__1 ], [ %182, %exiting__2 ] + %161 = icmp sle i64 %__qsVar9__idxOp__, %159 + %162 = icmp sge i64 %__qsVar9__idxOp__, %159 + %163 = select i1 %160, i1 %161, i1 %162 + br i1 %163, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %164 = srem i64 %__qsVar9__idxOp__, 4 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__v0123__, i64 %164) + %166 = bitcast i8* %165 to double* + %167 = load double, double* %166, align 8 + %168 = call i1 @Microsoft__Quantum__Chemistry__IsNotZero__body(double %167) + br i1 %168, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 %__qsVar9__idxOp__) + %170 = bitcast i8* %169 to %Array** + %171 = load %Array*, %Array** %170, align 8 + %172 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar6__qubitsPQJW__) + %173 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar7__qubitsRSJW__) + %174 = add i64 %172, %173 + %175 = call %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %174, i2 -2) + %paulis = call %Array* @__quantum__rt__array_concatenate(%Array* %171, %Array* %175) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %176 = srem i64 %__qsVar9__idxOp__, 4 + %177 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__v0123__, i64 %176) + %178 = bitcast i8* %177 to double* + %179 = load double, double* %178, align 8 + %theta = fmul double %stepSize, %179 + %180 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar4__qubitsPQ__, %Array* %__qsVar5__qubitsRS__) + call void @__quantum__rt__array_update_reference_count(%Array* %180, i32 1) + %181 = call %Array* @__quantum__rt__array_concatenate(%Array* %180, %Array* %__qsVar6__qubitsPQJW__) + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %181, %Array* %__qsVar7__qubitsRSJW__) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %175, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %180, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %180, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__2 + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %182 = add i64 %__qsVar9__idxOp__, %158 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__v0123__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__v0123__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsPQ__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar5__qubitsRS__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__qubitsPQJW__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__qubitsRSJW__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %183 = phi i64 [ 0, %exit__2 ], [ %188, %exiting__3 ] + %184 = icmp sle i64 %183, 7 + br i1 %184, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 %183) + %186 = bitcast i8* %185 to %Array** + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %187, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %188 = add i64 %183, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__qubitsPQ__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar5__qubitsRS__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__qubitsPQJW__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__qubitsRSJW__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %189 = phi i64 [ 0, %exit__3 ], [ %194, %exiting__4 ] + %190 = icmp sle i64 %189, 7 + br i1 %190, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 %189) + %192 = bitcast i8* %191 to %Array** + %193 = load %Array*, %Array** %192, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %193, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %194 = add i64 %189, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar8__ops__, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__adj(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWigner0123Term_____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %v0123 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %15 = load i64, i64* %12, align 4 + %16 = load i64, i64* %14, align 4 + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to i64* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to i64* + store i64 %15, i64* %19, align 4 + store i64 %16, i64* %21, align 4 + %qubitsPQ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %17, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %23 = bitcast i8* %22 to i64* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %25 = bitcast i8* %24 to i64* + %26 = load i64, i64* %23, align 4 + %27 = load i64, i64* %25, align 4 + %28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 0) + %30 = bitcast i8* %29 to i64* + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 1) + %32 = bitcast i8* %31 to i64* + store i64 %26, i64* %30, align 4 + store i64 %27, i64* %32, align 4 + %qubitsRS = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %28, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRS, i32 1) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %34 = bitcast i8* %33 to i64* + %35 = load i64, i64* %34, align 4 + %36 = add i64 %35, 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %38 = bitcast i8* %37 to i64* + %39 = load i64, i64* %38, align 4 + %40 = sub i64 %39, 1 + %41 = insertvalue %Range zeroinitializer, i64 %36, 0 + %42 = insertvalue %Range %41, i64 1, 1 + %43 = insertvalue %Range %42, i64 %40, 2 + %qubitsPQJW = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %43, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQJW, i32 1) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %45 = bitcast i8* %44 to i64* + %46 = load i64, i64* %45, align 4 + %47 = add i64 %46, 1 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %49 = bitcast i8* %48 to i64* + %50 = load i64, i64* %49, align 4 + %51 = sub i64 %50, 1 + %52 = insertvalue %Range zeroinitializer, i64 %47, 0 + %53 = insertvalue %Range %52, i64 1, 1 + %54 = insertvalue %Range %53, i64 %51, 2 + %qubitsRSJW = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %54, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRSJW, i32 1) + %55 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 1) + %59 = bitcast i8* %58 to i2* + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 2) + %61 = bitcast i8* %60 to i2* + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 3) + %63 = bitcast i8* %62 to i2* + store i2 1, i2* %57, align 1 + store i2 1, i2* %59, align 1 + store i2 1, i2* %61, align 1 + store i2 1, i2* %63, align 1 + %64 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 0) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 1) + %68 = bitcast i8* %67 to i2* + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 2) + %70 = bitcast i8* %69 to i2* + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 3) + %72 = bitcast i8* %71 to i2* + store i2 1, i2* %66, align 1 + store i2 1, i2* %68, align 1 + store i2 -1, i2* %70, align 1 + store i2 -1, i2* %72, align 1 + %73 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 0) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 1) + %77 = bitcast i8* %76 to i2* + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 2) + %79 = bitcast i8* %78 to i2* + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 3) + %81 = bitcast i8* %80 to i2* + store i2 1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + store i2 1, i2* %79, align 1 + store i2 -1, i2* %81, align 1 + %82 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 0) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 1) + %86 = bitcast i8* %85 to i2* + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 2) + %88 = bitcast i8* %87 to i2* + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 3) + %90 = bitcast i8* %89 to i2* + store i2 -1, i2* %84, align 1 + store i2 1, i2* %86, align 1 + store i2 1, i2* %88, align 1 + store i2 -1, i2* %90, align 1 + %91 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 0) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 1) + %95 = bitcast i8* %94 to i2* + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 2) + %97 = bitcast i8* %96 to i2* + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 3) + %99 = bitcast i8* %98 to i2* + store i2 -1, i2* %93, align 1 + store i2 -1, i2* %95, align 1 + store i2 -1, i2* %97, align 1 + store i2 -1, i2* %99, align 1 + %100 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 0) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 1) + %104 = bitcast i8* %103 to i2* + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 2) + %106 = bitcast i8* %105 to i2* + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 3) + %108 = bitcast i8* %107 to i2* + store i2 -1, i2* %102, align 1 + store i2 -1, i2* %104, align 1 + store i2 1, i2* %106, align 1 + store i2 1, i2* %108, align 1 + %109 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %109, i64 0) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %109, i64 1) + %113 = bitcast i8* %112 to i2* + %114 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %109, i64 2) + %115 = bitcast i8* %114 to i2* + %116 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %109, i64 3) + %117 = bitcast i8* %116 to i2* + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + store i2 -1, i2* %115, align 1 + store i2 1, i2* %117, align 1 + %118 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %118, i64 0) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %118, i64 1) + %122 = bitcast i8* %121 to i2* + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %118, i64 2) + %124 = bitcast i8* %123 to i2* + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %118, i64 3) + %126 = bitcast i8* %125 to i2* + store i2 1, i2* %120, align 1 + store i2 -1, i2* %122, align 1 + store i2 -1, i2* %124, align 1 + store i2 1, i2* %126, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %138 = bitcast i8* %137 to %Array** + %139 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %140 = bitcast i8* %139 to %Array** + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %142 = bitcast i8* %141 to %Array** + store %Array* %55, %Array** %128, align 8 + store %Array* %64, %Array** %130, align 8 + store %Array* %73, %Array** %132, align 8 + store %Array* %82, %Array** %134, align 8 + store %Array* %91, %Array** %136, align 8 + store %Array* %100, %Array** %138, align 8 + store %Array* %109, %Array** %140, align 8 + store %Array* %118, %Array** %142, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %143 = phi i64 [ 0, %entry ], [ %148, %exiting__1 ] + %144 = icmp sle i64 %143, 7 + br i1 %144, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %145 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %143) + %146 = bitcast i8* %145 to %Array** + %147 = load %Array*, %Array** %146, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %147, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %148 = add i64 %143, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %149 = call %Range @Microsoft__Quantum__Arrays___4719fa27e9f8473381154ca8c04d2b3a_IndexRange__body(%Array* %ops) + %150 = extractvalue %Range %149, 0 + %151 = extractvalue %Range %149, 1 + %152 = extractvalue %Range %149, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %153 = icmp sgt i64 %151, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxOp = phi i64 [ %150, %preheader__1 ], [ %180, %exiting__2 ] + %154 = icmp sle i64 %idxOp, %152 + %155 = icmp sge i64 %idxOp, %152 + %156 = select i1 %153, i1 %154, i1 %155 + br i1 %156, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %157 = srem i64 %idxOp, 4 + %158 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %v0123, i64 %157) + %159 = bitcast i8* %158 to double* + %160 = load double, double* %159, align 8 + %161 = call i1 @Microsoft__Quantum__Chemistry__IsNotZero__body(double %160) + br i1 %161, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %idxOp) + %163 = bitcast i8* %162 to %Array** + %164 = load %Array*, %Array** %163, align 8 + %165 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitsPQJW) + %166 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitsRSJW) + %167 = add i64 %165, %166 + %168 = call %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %167, i2 -2) + %paulis = call %Array* @__quantum__rt__array_concatenate(%Array* %164, %Array* %168) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %169 = srem i64 %idxOp, 4 + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %v0123, i64 %169) + %171 = bitcast i8* %170 to double* + %172 = load double, double* %171, align 8 + %theta = fmul double %stepSize, %172 + %173 = call %Array* @__quantum__rt__array_concatenate(%Array* %qubitsPQ, %Array* %qubitsRS) + call void @__quantum__rt__array_update_reference_count(%Array* %173, i32 1) + %174 = call %Array* @__quantum__rt__array_concatenate(%Array* %173, %Array* %qubitsPQJW) + call void @__quantum__rt__array_update_reference_count(%Array* %174, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %174, %Array* %qubitsRSJW) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %175 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %176 = bitcast %Tuple* %175 to { %Array*, double, %Array* }* + %177 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %176, i32 0, i32 0 + %178 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %176, i32 0, i32 1 + %179 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %176, i32 0, i32 2 + store %Array* %paulis, %Array** %177, align 8 + store double %theta, double* %178, align 8 + store %Array* %qubits__1, %Array** %179, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %176) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %168, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %173, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %173, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %174, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %174, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %175, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__2 + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %180 = add i64 %idxOp, %151 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRS, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQJW, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRSJW, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %181 = phi i64 [ 0, %exit__2 ], [ %186, %exiting__3 ] + %182 = icmp sle i64 %181, 7 + br i1 %182, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %181) + %184 = bitcast i8* %183 to %Array** + %185 = load %Array*, %Array** %184, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %185, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %186 = add i64 %181, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsRS, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsPQJW, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsRSJW, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %187 = phi i64 [ 0, %exit__3 ], [ %192, %exiting__4 ] + %188 = icmp sle i64 %187, 7 + br i1 %188, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %189 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %187) + %190 = bitcast i8* %189 to %Array** + %191 = load %Array*, %Array** %190, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %191, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %192 = add i64 %187, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctl(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWigner0123Term_____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__v0123__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__v0123__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__v0123__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %15 = load i64, i64* %12, align 4 + %16 = load i64, i64* %14, align 4 + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to i64* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to i64* + store i64 %15, i64* %19, align 4 + store i64 %16, i64* %21, align 4 + %__qsVar4__qubitsPQ__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %17, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsPQ__, i32 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %23 = bitcast i8* %22 to i64* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %25 = bitcast i8* %24 to i64* + %26 = load i64, i64* %23, align 4 + %27 = load i64, i64* %25, align 4 + %28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 0) + %30 = bitcast i8* %29 to i64* + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 1) + %32 = bitcast i8* %31 to i64* + store i64 %26, i64* %30, align 4 + store i64 %27, i64* %32, align 4 + %__qsVar5__qubitsRS__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %28, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar5__qubitsRS__, i32 1) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %34 = bitcast i8* %33 to i64* + %35 = load i64, i64* %34, align 4 + %36 = add i64 %35, 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %38 = bitcast i8* %37 to i64* + %39 = load i64, i64* %38, align 4 + %40 = sub i64 %39, 1 + %41 = insertvalue %Range zeroinitializer, i64 %36, 0 + %42 = insertvalue %Range %41, i64 1, 1 + %43 = insertvalue %Range %42, i64 %40, 2 + %__qsVar6__qubitsPQJW__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %43, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__qubitsPQJW__, i32 1) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %45 = bitcast i8* %44 to i64* + %46 = load i64, i64* %45, align 4 + %47 = add i64 %46, 1 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %49 = bitcast i8* %48 to i64* + %50 = load i64, i64* %49, align 4 + %51 = sub i64 %50, 1 + %52 = insertvalue %Range zeroinitializer, i64 %47, 0 + %53 = insertvalue %Range %52, i64 1, 1 + %54 = insertvalue %Range %53, i64 %51, 2 + %__qsVar7__qubitsRSJW__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %54, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__qubitsRSJW__, i32 1) + %55 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 1) + %59 = bitcast i8* %58 to i2* + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 2) + %61 = bitcast i8* %60 to i2* + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 3) + %63 = bitcast i8* %62 to i2* + store i2 1, i2* %57, align 1 + store i2 1, i2* %59, align 1 + store i2 1, i2* %61, align 1 + store i2 1, i2* %63, align 1 + %64 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 0) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 1) + %68 = bitcast i8* %67 to i2* + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 2) + %70 = bitcast i8* %69 to i2* + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 3) + %72 = bitcast i8* %71 to i2* + store i2 1, i2* %66, align 1 + store i2 1, i2* %68, align 1 + store i2 -1, i2* %70, align 1 + store i2 -1, i2* %72, align 1 + %73 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 0) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 1) + %77 = bitcast i8* %76 to i2* + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 2) + %79 = bitcast i8* %78 to i2* + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 3) + %81 = bitcast i8* %80 to i2* + store i2 1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + store i2 1, i2* %79, align 1 + store i2 -1, i2* %81, align 1 + %82 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 0) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 1) + %86 = bitcast i8* %85 to i2* + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 2) + %88 = bitcast i8* %87 to i2* + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 3) + %90 = bitcast i8* %89 to i2* + store i2 -1, i2* %84, align 1 + store i2 1, i2* %86, align 1 + store i2 1, i2* %88, align 1 + store i2 -1, i2* %90, align 1 + %91 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 0) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 1) + %95 = bitcast i8* %94 to i2* + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 2) + %97 = bitcast i8* %96 to i2* + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 3) + %99 = bitcast i8* %98 to i2* + store i2 -1, i2* %93, align 1 + store i2 -1, i2* %95, align 1 + store i2 -1, i2* %97, align 1 + store i2 -1, i2* %99, align 1 + %100 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 0) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 1) + %104 = bitcast i8* %103 to i2* + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 2) + %106 = bitcast i8* %105 to i2* + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 3) + %108 = bitcast i8* %107 to i2* + store i2 -1, i2* %102, align 1 + store i2 -1, i2* %104, align 1 + store i2 1, i2* %106, align 1 + store i2 1, i2* %108, align 1 + %109 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %109, i64 0) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %109, i64 1) + %113 = bitcast i8* %112 to i2* + %114 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %109, i64 2) + %115 = bitcast i8* %114 to i2* + %116 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %109, i64 3) + %117 = bitcast i8* %116 to i2* + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + store i2 -1, i2* %115, align 1 + store i2 1, i2* %117, align 1 + %118 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %118, i64 0) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %118, i64 1) + %122 = bitcast i8* %121 to i2* + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %118, i64 2) + %124 = bitcast i8* %123 to i2* + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %118, i64 3) + %126 = bitcast i8* %125 to i2* + store i2 1, i2* %120, align 1 + store i2 -1, i2* %122, align 1 + store i2 -1, i2* %124, align 1 + store i2 1, i2* %126, align 1 + %__qsVar8__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 0) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 1) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 2) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 3) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 4) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 5) + %138 = bitcast i8* %137 to %Array** + %139 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 6) + %140 = bitcast i8* %139 to %Array** + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 7) + %142 = bitcast i8* %141 to %Array** + store %Array* %55, %Array** %128, align 8 + store %Array* %64, %Array** %130, align 8 + store %Array* %73, %Array** %132, align 8 + store %Array* %82, %Array** %134, align 8 + store %Array* %91, %Array** %136, align 8 + store %Array* %100, %Array** %138, align 8 + store %Array* %109, %Array** %140, align 8 + store %Array* %118, %Array** %142, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %143 = phi i64 [ 0, %entry ], [ %148, %exiting__1 ] + %144 = icmp sle i64 %143, 7 + br i1 %144, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %145 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 %143) + %146 = bitcast i8* %145 to %Array** + %147 = load %Array*, %Array** %146, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %147, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %148 = add i64 %143, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__ops__, i32 1) + %149 = call %Range @Microsoft__Quantum__Arrays___4719fa27e9f8473381154ca8c04d2b3a_IndexRange__body(%Array* %__qsVar8__ops__) + %150 = extractvalue %Range %149, 0 + %151 = extractvalue %Range %149, 1 + %152 = extractvalue %Range %149, 2 + %153 = sub i64 %152, %150 + %154 = sdiv i64 %153, %151 + %155 = mul i64 %151, %154 + %156 = add i64 %150, %155 + %157 = sub i64 0, %151 + %158 = insertvalue %Range zeroinitializer, i64 %156, 0 + %159 = insertvalue %Range %158, i64 %157, 1 + %160 = insertvalue %Range %159, i64 %150, 2 + %161 = extractvalue %Range %160, 0 + %162 = extractvalue %Range %160, 1 + %163 = extractvalue %Range %160, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %164 = icmp sgt i64 %162, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar9__idxOp__ = phi i64 [ %161, %preheader__1 ], [ %191, %exiting__2 ] + %165 = icmp sle i64 %__qsVar9__idxOp__, %163 + %166 = icmp sge i64 %__qsVar9__idxOp__, %163 + %167 = select i1 %164, i1 %165, i1 %166 + br i1 %167, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %168 = srem i64 %__qsVar9__idxOp__, 4 + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__v0123__, i64 %168) + %170 = bitcast i8* %169 to double* + %171 = load double, double* %170, align 8 + %172 = call i1 @Microsoft__Quantum__Chemistry__IsNotZero__body(double %171) + br i1 %172, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 %__qsVar9__idxOp__) + %174 = bitcast i8* %173 to %Array** + %175 = load %Array*, %Array** %174, align 8 + %176 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar6__qubitsPQJW__) + %177 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar7__qubitsRSJW__) + %178 = add i64 %176, %177 + %179 = call %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %178, i2 -2) + %paulis = call %Array* @__quantum__rt__array_concatenate(%Array* %175, %Array* %179) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %180 = srem i64 %__qsVar9__idxOp__, 4 + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__v0123__, i64 %180) + %182 = bitcast i8* %181 to double* + %183 = load double, double* %182, align 8 + %theta = fmul double %stepSize, %183 + %184 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar4__qubitsPQ__, %Array* %__qsVar5__qubitsRS__) + call void @__quantum__rt__array_update_reference_count(%Array* %184, i32 1) + %185 = call %Array* @__quantum__rt__array_concatenate(%Array* %184, %Array* %__qsVar6__qubitsPQJW__) + call void @__quantum__rt__array_update_reference_count(%Array* %185, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %185, %Array* %__qsVar7__qubitsRSJW__) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %186 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %187 = bitcast %Tuple* %186 to { %Array*, double, %Array* }* + %188 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %187, i32 0, i32 0 + %189 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %187, i32 0, i32 1 + %190 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %187, i32 0, i32 2 + store %Array* %paulis, %Array** %188, align 8 + store double %theta, double* %189, align 8 + store %Array* %qubits__1, %Array** %190, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %187) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %179, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %184, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %184, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %185, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %185, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %186, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__2 + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %191 = add i64 %__qsVar9__idxOp__, %162 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__v0123__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__v0123__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsPQ__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar5__qubitsRS__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__qubitsPQJW__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__qubitsRSJW__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %192 = phi i64 [ 0, %exit__2 ], [ %197, %exiting__3 ] + %193 = icmp sle i64 %192, 7 + br i1 %193, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %194 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 %192) + %195 = bitcast i8* %194 to %Array** + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %196, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %197 = add i64 %192, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__qubitsPQ__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar5__qubitsRS__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__qubitsPQJW__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__qubitsRSJW__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %198 = phi i64 [ 0, %exit__3 ], [ %203, %exiting__4 ] + %199 = icmp sle i64 %198, 7 + br i1 %199, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %200 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar8__ops__, i64 %198) + %201 = bitcast i8* %200 to %Array** + %202 = load %Array*, %Array** %201, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %202, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %203 = add i64 %198, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar8__ops__, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctladj(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQRSTerm____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %p = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %q = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %12 = bitcast i8* %11 to i64* + %r = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %14 = bitcast i8* %13 to i64* + %s = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %18 = fmul double 1.250000e-01, %17 + %angle = fmul double %18, %stepSize + %19 = icmp eq i64 %p, %q + br i1 %19, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %20 = icmp eq i64 %p, %r + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %21 = phi i1 [ %19, %entry ], [ %20, %condFalse__1 ] + br i1 %21, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %22 = icmp eq i64 %p, %s + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %23 = phi i1 [ %21, %condContinue__1 ], [ %22, %condFalse__2 ] + br i1 %23, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %24 = icmp eq i64 %q, %r + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %25 = phi i1 [ %23, %condContinue__2 ], [ %24, %condFalse__3 ] + br i1 %25, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %26 = icmp eq i64 %q, %s + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %27 = phi i1 [ %25, %condContinue__3 ], [ %26, %condFalse__4 ] + br i1 %27, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %28 = icmp eq i64 %r, %s + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %29 = phi i1 [ %27, %condContinue__4 ], [ %28, %condFalse__5 ] + br i1 %29, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @0, i32 0, i32 0)) + %31 = call %String* @__quantum__rt__int_to_string(i64 %p) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__int_to_string(i64 %q) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %r) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %s) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @2, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %46) + unreachable + +continue__1: ; preds = %condContinue__5 + %47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 1) + %51 = bitcast i8* %50 to i2* + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 2) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 3) + %55 = bitcast i8* %54 to i2* + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + store i2 1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i2* + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 2) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 3) + %64 = bitcast i8* %63 to i2* + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + store i2 1, i2* %62, align 1 + store i2 -1, i2* %64, align 1 + %65 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i2* + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 2) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 3) + %73 = bitcast i8* %72 to i2* + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + store i2 -1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + %74 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 2) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 3) + %82 = bitcast i8* %81 to i2* + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + store i2 -1, i2* %80, align 1 + store i2 -1, i2* %82, align 1 + %83 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 0) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 1) + %87 = bitcast i8* %86 to i2* + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 2) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 3) + %91 = bitcast i8* %90 to i2* + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + store i2 1, i2* %89, align 1 + store i2 1, i2* %91, align 1 + %92 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i2* + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 2) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 3) + %100 = bitcast i8* %99 to i2* + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + store i2 1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + %101 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i2* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i2* + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 2) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 3) + %109 = bitcast i8* %108 to i2* + store i2 -1, i2* %103, align 1 + store i2 -1, i2* %105, align 1 + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + %110 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i2* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i2* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %112, align 1 + store i2 1, i2* %114, align 1 + store i2 -1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %134 = bitcast i8* %133 to %Array** + store %Array* %47, %Array** %120, align 8 + store %Array* %56, %Array** %122, align 8 + store %Array* %65, %Array** %124, align 8 + store %Array* %74, %Array** %126, align 8 + store %Array* %83, %Array** %128, align 8 + store %Array* %92, %Array** %130, align 8 + store %Array* %101, %Array** %132, align 8 + store %Array* %110, %Array** %134, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %135 = phi i64 [ 0, %continue__1 ], [ %140, %exiting__1 ] + %136 = icmp sle i64 %135, 7 + br i1 %136, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %135) + %138 = bitcast i8* %137 to %Array** + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %140 = add i64 %135, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %141 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 0) + %143 = bitcast i8* %142 to i64* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 1) + %145 = bitcast i8* %144 to i64* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 2) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 3) + %149 = bitcast i8* %148 to i64* + store i64 %p, i64* %143, align 4 + store i64 %q, i64* %145, align 4 + store i64 %r, i64* %147, align 4 + store i64 %s, i64* %149, align 4 + %150 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %141) + %151 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 0 + %sortedIndices = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 1) + %152 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 1 + %signs = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %153 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 2 + %globalSign = load double, double* %153, align 8 + %154 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %ops, %Array* %signs) + %155 = call i64 @__quantum__rt__array_get_size_1d(%Array* %154) + %156 = sub i64 %155, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %157 = phi i64 [ 0, %exit__1 ], [ %166, %exiting__2 ] + %158 = icmp sle i64 %157, %156 + br i1 %158, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %159 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %157) + %160 = bitcast i8* %159 to { %Array*, double }** + %161 = load { %Array*, double }*, { %Array*, double }** %160, align 8 + %162 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %161, i32 0, i32 0 + %op = load %Array*, %Array** %162, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %163 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %161, i32 0, i32 1 + %sign = load double, double* %163, align 8 + %164 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %164, %Array* %sortedIndices, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %165 = fmul double %globalSign, %sign + %theta = fmul double %165, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %pauliString, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %166 = add i64 %157, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %167 = phi i64 [ 0, %exit__2 ], [ %172, %exiting__3 ] + %168 = icmp sle i64 %167, 7 + br i1 %168, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %167) + %170 = bitcast i8* %169 to %Array** + %171 = load %Array*, %Array** %170, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %171, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %172 = add i64 %167, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %173 = phi i64 [ 0, %exit__3 ], [ %178, %exiting__4 ] + %174 = icmp sle i64 %173, 7 + br i1 %174, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %175 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %173) + %176 = bitcast i8* %175 to %Array** + %177 = load %Array*, %Array** %176, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %177, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %178 = add i64 %173, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %179 = bitcast { %Array*, %Array*, double }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %179, i32 -1) + %180 = sub i64 %155, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %181 = phi i64 [ 0, %exit__4 ], [ %189, %exiting__5 ] + %182 = icmp sle i64 %181, %180 + br i1 %182, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %181) + %184 = bitcast i8* %183 to { %Array*, double }** + %185 = load { %Array*, double }*, { %Array*, double }** %184, align 8 + %186 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %185, i32 0, i32 0 + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %187, i32 -1) + %188 = bitcast { %Array*, double }* %185 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %189 = add i64 %181, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + ret void +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %String* @__quantum__rt__int_to_string(i64) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +declare void @__quantum__rt__fail(%String*) + +define internal { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %indices) { +entry: + %sign = alloca double, align 8 + %signs = alloca %Array*, align 8 + %sorted = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %1 = bitcast i8* %0 to i64* + %p = load i64, i64* %1, align 4 + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %3 = bitcast i8* %2 to i64* + %q = load i64, i64* %3, align 4 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 2) + %5 = bitcast i8* %4 to i64* + %r = load i64, i64* %5, align 4 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %7 = bitcast i8* %6 to i64* + %s = load i64, i64* %7, align 4 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 0) + %10 = bitcast i8* %9 to i64* + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 1) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 2) + %14 = bitcast i8* %13 to i64* + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 3) + %16 = bitcast i8* %15 to i64* + store i64 0, i64* %10, align 4 + store i64 0, i64* %12, align 4 + store i64 0, i64* %14, align 4 + store i64 0, i64* %16, align 4 + store %Array* %8, %Array** %sorted, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to double* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to double* + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 2) + %23 = bitcast i8* %22 to double* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 3) + %25 = bitcast i8* %24 to double* + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 4) + %27 = bitcast i8* %26 to double* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 5) + %29 = bitcast i8* %28 to double* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 6) + %31 = bitcast i8* %30 to double* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 7) + %33 = bitcast i8* %32 to double* + store double 0.000000e+00, double* %19, align 8 + store double 0.000000e+00, double* %21, align 8 + store double 0.000000e+00, double* %23, align 8 + store double 0.000000e+00, double* %25, align 8 + store double 0.000000e+00, double* %27, align 8 + store double 0.000000e+00, double* %29, align 8 + store double 0.000000e+00, double* %31, align 8 + store double 0.000000e+00, double* %33, align 8 + store %Array* %17, %Array** %signs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + store double 1.000000e+00, double* %sign, align 8 + %34 = icmp sgt i64 %p, %q + br i1 %34, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + store double -1.000000e+00, double* %sign, align 8 + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %35 = icmp sgt i64 %r, %s + br i1 %35, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %36 = load double, double* %sign, align 8 + %37 = fmul double %36, -1.000000e+00 + store double %37, double* %sign, align 8 + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i64* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 1) + %42 = bitcast i8* %41 to i64* + store i64 %p, i64* %40, align 4 + store i64 %q, i64* %42, align 4 + %43 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %38) + %44 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 0) + %46 = bitcast i8* %45 to i64* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 1) + %48 = bitcast i8* %47 to i64* + store i64 %r, i64* %46, align 4 + store i64 %s, i64* %48, align 4 + %49 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %44) + %50 = icmp sgt i64 %43, %49 + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + br i1 %50, label %then0__3, label %else__1 + +then0__3: ; preds = %continue__2 + %51 = load double, double* %sign, align 8 + %52 = fmul double %51, -1.000000e+00 + store double %52, double* %sign, align 8 + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to i64* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 1) + %57 = bitcast i8* %56 to i64* + store i64 %r, i64* %55, align 4 + store i64 %s, i64* %57, align 4 + %58 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %53) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + %59 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 0) + %61 = bitcast i8* %60 to i64* + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 1) + %63 = bitcast i8* %62 to i64* + store i64 %r, i64* %61, align 4 + store i64 %s, i64* %63, align 4 + %64 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %59) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + %65 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i64* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i64* + store i64 %p, i64* %67, align 4 + store i64 %q, i64* %69, align 4 + %70 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %65) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + %71 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 0) + %73 = bitcast i8* %72 to i64* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 1) + %75 = bitcast i8* %74 to i64* + store i64 %p, i64* %73, align 4 + store i64 %q, i64* %75, align 4 + %76 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %71) + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + %77 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 0) + %79 = bitcast i8* %78 to i64* + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 1) + %81 = bitcast i8* %80 to i64* + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 2) + %83 = bitcast i8* %82 to i64* + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 3) + %85 = bitcast i8* %84 to i64* + store i64 %58, i64* %79, align 4 + store i64 %64, i64* %81, align 4 + store i64 %70, i64* %83, align 4 + store i64 %76, i64* %85, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %77, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + store %Array* %77, %Array** %sorted, align 8 + br label %continue__3 + +else__1: ; preds = %continue__2 + %86 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 0) + %88 = bitcast i8* %87 to i64* + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 1) + %90 = bitcast i8* %89 to i64* + store i64 %p, i64* %88, align 4 + store i64 %q, i64* %90, align 4 + %91 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %86) + call void @__quantum__rt__array_update_reference_count(%Array* %86, i32 -1) + %92 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i64* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i64* + store i64 %p, i64* %94, align 4 + store i64 %q, i64* %96, align 4 + %97 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %92) + call void @__quantum__rt__array_update_reference_count(%Array* %92, i32 -1) + %98 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 0) + %100 = bitcast i8* %99 to i64* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 1) + %102 = bitcast i8* %101 to i64* + store i64 %r, i64* %100, align 4 + store i64 %s, i64* %102, align 4 + %103 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %98) + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + %104 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 0) + %106 = bitcast i8* %105 to i64* + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 1) + %108 = bitcast i8* %107 to i64* + store i64 %r, i64* %106, align 4 + store i64 %s, i64* %108, align 4 + %109 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %104) + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + %110 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i64* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i64* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i64* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i64* + store i64 %91, i64* %112, align 4 + store i64 %97, i64* %114, align 4 + store i64 %103, i64* %116, align 4 + store i64 %109, i64* %118, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %110, i32 1) + %119 = load %Array*, %Array** %sorted, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 -1) + store %Array* %110, %Array** %sorted, align 8 + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + %120 = load %Array*, %Array** %sorted, align 8 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 0) + %122 = bitcast i8* %121 to i64* + %p1 = load i64, i64* %122, align 4 + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 1) + %124 = bitcast i8* %123 to i64* + %q1 = load i64, i64* %124, align 4 + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 2) + %126 = bitcast i8* %125 to i64* + %r1 = load i64, i64* %126, align 4 + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 3) + %128 = bitcast i8* %127 to i64* + %s1 = load i64, i64* %128, align 4 + %129 = icmp slt i64 %q1, %r1 + br i1 %129, label %then0__4, label %test1__1 + +then0__4: ; preds = %continue__3 + %130 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 0) + %132 = bitcast i8* %131 to i64* + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 1) + %134 = bitcast i8* %133 to i64* + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 2) + %136 = bitcast i8* %135 to i64* + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 3) + %138 = bitcast i8* %137 to i64* + store i64 %p1, i64* %132, align 4 + store i64 %q1, i64* %134, align 4 + store i64 %r1, i64* %136, align 4 + store i64 %s1, i64* %138, align 4 + %139 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %140 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 0) + %141 = bitcast i8* %140 to double* + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 1) + %143 = bitcast i8* %142 to double* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 2) + %145 = bitcast i8* %144 to double* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 3) + %147 = bitcast i8* %146 to double* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 4) + %149 = bitcast i8* %148 to double* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 5) + %151 = bitcast i8* %150 to double* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 6) + %153 = bitcast i8* %152 to double* + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 7) + %155 = bitcast i8* %154 to double* + store double 1.000000e+00, double* %141, align 8 + store double -1.000000e+00, double* %143, align 8 + store double -1.000000e+00, double* %145, align 8 + store double -1.000000e+00, double* %147, align 8 + store double 1.000000e+00, double* %149, align 8 + store double 1.000000e+00, double* %151, align 8 + store double 1.000000e+00, double* %153, align 8 + store double -1.000000e+00, double* %155, align 8 + %156 = load double, double* %sign, align 8 + %157 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %158 = bitcast %Tuple* %157 to { %Array*, %Array*, double }* + %159 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 0 + %160 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 1 + %161 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 2 + store %Array* %130, %Array** %159, align 8 + store %Array* %139, %Array** %160, align 8 + store double %156, double* %161, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %158 + +test1__1: ; preds = %continue__3 + %162 = icmp sgt i64 %q1, %r1 + br i1 %162, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %test1__1 + %163 = icmp slt i64 %q1, %s1 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %test1__1 + %164 = phi i1 [ %163, %condTrue__1 ], [ %162, %test1__1 ] + br i1 %164, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__1 + %165 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 0) + %167 = bitcast i8* %166 to i64* + %168 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 1) + %169 = bitcast i8* %168 to i64* + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 2) + %171 = bitcast i8* %170 to i64* + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 3) + %173 = bitcast i8* %172 to i64* + store i64 %p1, i64* %167, align 4 + store i64 %r1, i64* %169, align 4 + store i64 %q1, i64* %171, align 4 + store i64 %s1, i64* %173, align 4 + %174 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %175 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 0) + %176 = bitcast i8* %175 to double* + %177 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 1) + %178 = bitcast i8* %177 to double* + %179 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 2) + %180 = bitcast i8* %179 to double* + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 3) + %182 = bitcast i8* %181 to double* + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 4) + %184 = bitcast i8* %183 to double* + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 5) + %186 = bitcast i8* %185 to double* + %187 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 6) + %188 = bitcast i8* %187 to double* + %189 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 7) + %190 = bitcast i8* %189 to double* + store double -1.000000e+00, double* %176, align 8 + store double -1.000000e+00, double* %178, align 8 + store double -1.000000e+00, double* %180, align 8 + store double 1.000000e+00, double* %182, align 8 + store double -1.000000e+00, double* %184, align 8 + store double 1.000000e+00, double* %186, align 8 + store double 1.000000e+00, double* %188, align 8 + store double 1.000000e+00, double* %190, align 8 + %191 = load double, double* %sign, align 8 + %192 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %193 = bitcast %Tuple* %192 to { %Array*, %Array*, double }* + %194 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 0 + %195 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 1 + %196 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 2 + store %Array* %165, %Array** %194, align 8 + store %Array* %174, %Array** %195, align 8 + store double %191, double* %196, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %193 + +test2__1: ; preds = %condContinue__1 + %197 = icmp sgt i64 %q1, %r1 + br i1 %197, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %test2__1 + %198 = icmp sgt i64 %q1, %s1 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %test2__1 + %199 = phi i1 [ %198, %condTrue__2 ], [ %197, %test2__1 ] + br i1 %199, label %then2__1, label %else__2 + +then2__1: ; preds = %condContinue__2 + %200 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 0) + %202 = bitcast i8* %201 to i64* + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 1) + %204 = bitcast i8* %203 to i64* + %205 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 2) + %206 = bitcast i8* %205 to i64* + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 3) + %208 = bitcast i8* %207 to i64* + store i64 %p1, i64* %202, align 4 + store i64 %r1, i64* %204, align 4 + store i64 %s1, i64* %206, align 4 + store i64 %q1, i64* %208, align 4 + %209 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 0) + %211 = bitcast i8* %210 to double* + %212 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 1) + %213 = bitcast i8* %212 to double* + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 2) + %215 = bitcast i8* %214 to double* + %216 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 3) + %217 = bitcast i8* %216 to double* + %218 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 4) + %219 = bitcast i8* %218 to double* + %220 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 5) + %221 = bitcast i8* %220 to double* + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 6) + %223 = bitcast i8* %222 to double* + %224 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 7) + %225 = bitcast i8* %224 to double* + store double 1.000000e+00, double* %211, align 8 + store double 1.000000e+00, double* %213, align 8 + store double -1.000000e+00, double* %215, align 8 + store double 1.000000e+00, double* %217, align 8 + store double -1.000000e+00, double* %219, align 8 + store double 1.000000e+00, double* %221, align 8 + store double -1.000000e+00, double* %223, align 8 + store double -1.000000e+00, double* %225, align 8 + %226 = load double, double* %sign, align 8 + %227 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %228 = bitcast %Tuple* %227 to { %Array*, %Array*, double }* + %229 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 0 + %230 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 1 + %231 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 2 + store %Array* %200, %Array** %229, align 8 + store %Array* %209, %Array** %230, align 8 + store double %226, double* %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %228 + +else__2: ; preds = %condContinue__2 + %232 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__fail(%String* %232) + unreachable + +continue__4: ; No predecessors! + unreachable +} + +define internal %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %9 = icmp slt i64 %0, %8 + br i1 %9, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %8, %condFalse__1 ] + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %12 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %condContinue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %14 = bitcast i8* %13 to %Array** + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double }* getelementptr ({ %Array*, double }, { %Array*, double }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, double }* + %21 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %20, i32 0, i32 1 + store %Array* %15, %Array** %21, align 8 + store double %18, double* %22, align 8 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %24 = sub i64 %nElements, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %25) + %28 = bitcast i8* %27 to %Array** + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %24 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %31) + %34 = bitcast i8* %33 to { %Array*, double }** + store { %Array*, double }* %20, { %Array*, double }** %34, align 8 + %35 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %output, align 8 + %37 = sub i64 %nElements, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %38 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %38) + %41 = bitcast i8* %40 to { %Array*, double }** + %42 = load { %Array*, double }*, { %Array*, double }** %41, align 8 + %43 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %42, i32 0, i32 0 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { %Array*, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %47 = sub i64 %nElements, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idxElement = phi i64 [ 1, %exit__4 ], [ %67, %exiting__5 ] + %48 = icmp sle i64 %idxElement, %47 + br i1 %48, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = call %Array* @__quantum__rt__array_copy(%Array* %49, i1 false) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %52 = bitcast i8* %51 to %Array** + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %55 = bitcast i8* %54 to double* + %56 = load double, double* %55, align 8 + %57 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double }* getelementptr ({ %Array*, double }, { %Array*, double }* null, i32 1) to i64)) + %58 = bitcast %Tuple* %57 to { %Array*, double }* + %59 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %58, i32 0, i32 0 + %60 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %58, i32 0, i32 1 + store %Array* %53, %Array** %59, align 8 + store double %56, double* %60, align 8 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %idxElement) + %62 = bitcast i8* %61 to { %Array*, double }** + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %63 = load { %Array*, double }*, { %Array*, double }** %62, align 8 + %64 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 0 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = bitcast { %Array*, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + store { %Array*, double }* %58, { %Array*, double }** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + store %Array* %50, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %67 = add i64 %idxElement, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %68 = load %Array*, %Array** %output, align 8 + %69 = load %Array*, %Array** %21, align 8 + %70 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %76, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %71) + %74 = bitcast i8* %73 to %Array** + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %76 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %77 = call i64 @__quantum__rt__array_get_size_1d(%Array* %68) + %78 = sub i64 %77, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %79 = phi i64 [ 0, %exit__6 ], [ %87, %exiting__7 ] + %80 = icmp sle i64 %79, %78 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %68, i64 %79) + %82 = bitcast i8* %81 to { %Array*, double }** + %83 = load { %Array*, double }*, { %Array*, double }** %82, align 8 + %84 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %83, i32 0, i32 0 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { %Array*, double }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %87 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %68 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %nFermions, %Array* %idxFermions, %Array* %pauliReplacements) { +entry: + %pauliString = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliReplacements, i32 1) + %0 = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliZString__body(i64 %nFermions, %Array* %idxFermions) + store %Array* %0, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %1 = call %Range @Microsoft__Quantum__Arrays___f18da7cbe4e940478813d7485ea738db_IndexRange__body(%Array* %idxFermions) + %2 = extractvalue %Range %1, 0 + %3 = extractvalue %Range %1, 1 + %4 = extractvalue %Range %1, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %5 = icmp sgt i64 %3, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %2, %preheader__1 ], [ %17, %exiting__1 ] + %6 = icmp sle i64 %idx, %4 + %7 = icmp sge i64 %idx, %4 + %8 = select i1 %5, i1 %6, i1 %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %idx) + %10 = bitcast i8* %9 to i64* + %idxFermion = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %pauliReplacements, i64 %idx) + %12 = bitcast i8* %11 to i2* + %op = load i2, i2* %12, align 1 + %13 = load %Array*, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + %14 = call %Array* @__quantum__rt__array_copy(%Array* %13, i1 false) + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %idxFermion) + %16 = bitcast i8* %15 to i2* + store i2 %op, i2* %16, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + store %Array* %14, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %idx, %3 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = load %Array*, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliReplacements, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + ret %Array* %18 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQRSTerm____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__p__ = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %__qsVar4__q__ = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %12 = bitcast i8* %11 to i64* + %__qsVar5__r__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %14 = bitcast i8* %13 to i64* + %__qsVar6__s__ = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %18 = fmul double 1.250000e-01, %17 + %__qsVar7__angle__ = fmul double %18, %stepSize + %19 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %19, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %20 = icmp eq i64 %__qsVar3__p__, %__qsVar5__r__ + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %21 = phi i1 [ %19, %entry ], [ %20, %condFalse__1 ] + br i1 %21, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %22 = icmp eq i64 %__qsVar3__p__, %__qsVar6__s__ + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %23 = phi i1 [ %21, %condContinue__1 ], [ %22, %condFalse__2 ] + br i1 %23, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %24 = icmp eq i64 %__qsVar4__q__, %__qsVar5__r__ + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %25 = phi i1 [ %23, %condContinue__2 ], [ %24, %condFalse__3 ] + br i1 %25, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %26 = icmp eq i64 %__qsVar4__q__, %__qsVar6__s__ + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %27 = phi i1 [ %25, %condContinue__3 ], [ %26, %condFalse__4 ] + br i1 %27, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %28 = icmp eq i64 %__qsVar5__r__, %__qsVar6__s__ + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %29 = phi i1 [ %27, %condContinue__4 ], [ %28, %condFalse__5 ] + br i1 %29, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @0, i32 0, i32 0)) + %31 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar5__r__) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar6__s__) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @2, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %46) + unreachable + +continue__1: ; preds = %condContinue__5 + %47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 1) + %51 = bitcast i8* %50 to i2* + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 2) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 3) + %55 = bitcast i8* %54 to i2* + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + store i2 1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i2* + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 2) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 3) + %64 = bitcast i8* %63 to i2* + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + store i2 1, i2* %62, align 1 + store i2 -1, i2* %64, align 1 + %65 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i2* + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 2) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 3) + %73 = bitcast i8* %72 to i2* + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + store i2 -1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + %74 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 2) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 3) + %82 = bitcast i8* %81 to i2* + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + store i2 -1, i2* %80, align 1 + store i2 -1, i2* %82, align 1 + %83 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 0) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 1) + %87 = bitcast i8* %86 to i2* + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 2) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 3) + %91 = bitcast i8* %90 to i2* + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + store i2 1, i2* %89, align 1 + store i2 1, i2* %91, align 1 + %92 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i2* + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 2) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 3) + %100 = bitcast i8* %99 to i2* + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + store i2 1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + %101 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i2* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i2* + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 2) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 3) + %109 = bitcast i8* %108 to i2* + store i2 -1, i2* %103, align 1 + store i2 -1, i2* %105, align 1 + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + %110 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i2* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i2* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %112, align 1 + store i2 1, i2* %114, align 1 + store i2 -1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %__qsVar10__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 0) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 1) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 2) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 3) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 4) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 5) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 6) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 7) + %134 = bitcast i8* %133 to %Array** + store %Array* %47, %Array** %120, align 8 + store %Array* %56, %Array** %122, align 8 + store %Array* %65, %Array** %124, align 8 + store %Array* %74, %Array** %126, align 8 + store %Array* %83, %Array** %128, align 8 + store %Array* %92, %Array** %130, align 8 + store %Array* %101, %Array** %132, align 8 + store %Array* %110, %Array** %134, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %135 = phi i64 [ 0, %continue__1 ], [ %140, %exiting__1 ] + %136 = icmp sle i64 %135, 7 + br i1 %136, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %135) + %138 = bitcast i8* %137 to %Array** + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %140 = add i64 %135, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 1) + %141 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 0) + %143 = bitcast i8* %142 to i64* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 1) + %145 = bitcast i8* %144 to i64* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 2) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 3) + %149 = bitcast i8* %148 to i64* + store i64 %__qsVar3__p__, i64* %143, align 4 + store i64 %__qsVar4__q__, i64* %145, align 4 + store i64 %__qsVar5__r__, i64* %147, align 4 + store i64 %__qsVar6__s__, i64* %149, align 4 + %150 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %141) + %151 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 0 + %__qsVar11__sortedIndices__ = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 1) + %152 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 1 + %__qsVar12__signs__ = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 1) + %153 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 2 + %__qsVar13__globalSign__ = load double, double* %153, align 8 + %154 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %155 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %156 = call i64 @__quantum__rt__array_get_size_1d(%Array* %155) + %157 = sub i64 %156, 1 + %158 = insertvalue %Range zeroinitializer, i64 %157, 0 + %159 = insertvalue %Range %158, i64 -1, 1 + %160 = insertvalue %Range %159, i64 0, 2 + %161 = call %Array* @__quantum__rt__array_slice_1d(%Array* %154, %Range %160, i1 true) + %162 = call i64 @__quantum__rt__array_get_size_1d(%Array* %161) + %163 = sub i64 %162, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %164 = phi i64 [ 0, %exit__1 ], [ %173, %exiting__2 ] + %165 = icmp sle i64 %164, %163 + br i1 %165, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 %164) + %167 = bitcast i8* %166 to { %Array*, double }** + %168 = load { %Array*, double }*, { %Array*, double }** %167, align 8 + %169 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %168, i32 0, i32 0 + %__qsVar14__op__ = load %Array*, %Array** %169, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 1) + %170 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %168, i32 0, i32 1 + %__qsVar15__sign__ = load double, double* %170, align 8 + %171 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar16__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %171, %Array* %__qsVar11__sortedIndices__, %Array* %__qsVar14__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + %172 = fmul double %__qsVar13__globalSign__, %__qsVar15__sign__ + %theta = fmul double %172, %__qsVar7__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %__qsVar16__pauliString__, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %173 = add i64 %164, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %174 = phi i64 [ 0, %exit__2 ], [ %179, %exiting__3 ] + %175 = icmp sle i64 %174, 7 + br i1 %175, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %176 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %174) + %177 = bitcast i8* %176 to %Array** + %178 = load %Array*, %Array** %177, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %178, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %179 = add i64 %174, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %180 = phi i64 [ 0, %exit__3 ], [ %185, %exiting__4 ] + %181 = icmp sle i64 %180, 7 + br i1 %181, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %180) + %183 = bitcast i8* %182 to %Array** + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %184, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %185 = add i64 %180, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar12__signs__, i32 -1) + %186 = bitcast { %Array*, %Array*, double }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %186, i32 -1) + %187 = call i64 @__quantum__rt__array_get_size_1d(%Array* %154) + %188 = sub i64 %187, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %189 = phi i64 [ 0, %exit__4 ], [ %197, %exiting__5 ] + %190 = icmp sle i64 %189, %188 + br i1 %190, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %189) + %192 = bitcast i8* %191 to { %Array*, double }** + %193 = load { %Array*, double }*, { %Array*, double }** %192, align 8 + %194 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %193, i32 0, i32 0 + %195 = load %Array*, %Array** %194, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %195, i32 -1) + %196 = bitcast { %Array*, double }* %193 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %196, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %197 = add i64 %189, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + %198 = sub i64 %156, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %199 = phi i64 [ 0, %exit__5 ], [ %207, %exiting__6 ] + %200 = icmp sle i64 %199, %198 + br i1 %200, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %155, i64 %199) + %202 = bitcast i8* %201 to { %Array*, double }** + %203 = load { %Array*, double }*, { %Array*, double }** %202, align 8 + %204 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %203, i32 0, i32 0 + %205 = load %Array*, %Array** %204, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %205, i32 -1) + %206 = bitcast { %Array*, double }* %203 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %206, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %207 = add i64 %199, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %155, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQRSTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %p = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %q = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %16 = bitcast i8* %15 to i64* + %r = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %18 = bitcast i8* %17 to i64* + %s = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = fmul double 1.250000e-01, %21 + %angle = fmul double %22, %stepSize + %23 = icmp eq i64 %p, %q + br i1 %23, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %24 = icmp eq i64 %p, %r + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %25 = phi i1 [ %23, %entry ], [ %24, %condFalse__1 ] + br i1 %25, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %26 = icmp eq i64 %p, %s + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %27 = phi i1 [ %25, %condContinue__1 ], [ %26, %condFalse__2 ] + br i1 %27, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %28 = icmp eq i64 %q, %r + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %29 = phi i1 [ %27, %condContinue__2 ], [ %28, %condFalse__3 ] + br i1 %29, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %30 = icmp eq i64 %q, %s + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %31 = phi i1 [ %29, %condContinue__3 ], [ %30, %condFalse__4 ] + br i1 %31, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %32 = icmp eq i64 %r, %s + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %33 = phi i1 [ %31, %condContinue__4 ], [ %32, %condFalse__5 ] + br i1 %33, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @0, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__int_to_string(i64 %p) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %q) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %r) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__int_to_string(i64 %s) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @2, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %50) + unreachable + +continue__1: ; preds = %condContinue__5 + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 -1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 -1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 -1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 -1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 1, i2* %93, align 1 + store i2 1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 -1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %139 = phi i64 [ 0, %continue__1 ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %145 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 0) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 1) + %149 = bitcast i8* %148 to i64* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 2) + %151 = bitcast i8* %150 to i64* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 3) + %153 = bitcast i8* %152 to i64* + store i64 %p, i64* %147, align 4 + store i64 %q, i64* %149, align 4 + store i64 %r, i64* %151, align 4 + store i64 %s, i64* %153, align 4 + %154 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %145) + %155 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 0 + %sortedIndices = load %Array*, %Array** %155, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 1) + %156 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 1 + %signs = load %Array*, %Array** %156, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %157 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 2 + %globalSign = load double, double* %157, align 8 + %158 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %ops, %Array* %signs) + %159 = call i64 @__quantum__rt__array_get_size_1d(%Array* %158) + %160 = sub i64 %159, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %161 = phi i64 [ 0, %exit__1 ], [ %175, %exiting__2 ] + %162 = icmp sle i64 %161, %160 + br i1 %162, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %163 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %161) + %164 = bitcast i8* %163 to { %Array*, double }** + %165 = load { %Array*, double }*, { %Array*, double }** %164, align 8 + %166 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %165, i32 0, i32 0 + %op = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %167 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %165, i32 0, i32 1 + %sign = load double, double* %167, align 8 + %168 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %168, %Array* %sortedIndices, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %169 = fmul double %globalSign, %sign + %theta = fmul double %169, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %170 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %171 = bitcast %Tuple* %170 to { %Array*, double, %Array* }* + %172 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 0 + %173 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 1 + %174 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 2 + store %Array* %pauliString, %Array** %172, align 8 + store double %theta, double* %173, align 8 + store %Array* %qubits, %Array** %174, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %171) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %170, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %175 = add i64 %161, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %176 = phi i64 [ 0, %exit__2 ], [ %181, %exiting__3 ] + %177 = icmp sle i64 %176, 7 + br i1 %177, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %176) + %179 = bitcast i8* %178 to %Array** + %180 = load %Array*, %Array** %179, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %180, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %181 = add i64 %176, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %182 = phi i64 [ 0, %exit__3 ], [ %187, %exiting__4 ] + %183 = icmp sle i64 %182, 7 + br i1 %183, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %184 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %182) + %185 = bitcast i8* %184 to %Array** + %186 = load %Array*, %Array** %185, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %186, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %187 = add i64 %182, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %188 = bitcast { %Array*, %Array*, double }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + %189 = sub i64 %159, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %190 = phi i64 [ 0, %exit__4 ], [ %198, %exiting__5 ] + %191 = icmp sle i64 %190, %189 + br i1 %191, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %190) + %193 = bitcast i8* %192 to { %Array*, double }** + %194 = load { %Array*, double }*, { %Array*, double }** %193, align 8 + %195 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %194, i32 0, i32 0 + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %196, i32 -1) + %197 = bitcast { %Array*, double }* %194 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %197, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %198 = add i64 %190, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %158, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQRSTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__p__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %__qsVar4__q__ = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %16 = bitcast i8* %15 to i64* + %__qsVar5__r__ = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %18 = bitcast i8* %17 to i64* + %__qsVar6__s__ = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = fmul double 1.250000e-01, %21 + %__qsVar7__angle__ = fmul double %22, %stepSize + %23 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %23, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %24 = icmp eq i64 %__qsVar3__p__, %__qsVar5__r__ + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %25 = phi i1 [ %23, %entry ], [ %24, %condFalse__1 ] + br i1 %25, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %26 = icmp eq i64 %__qsVar3__p__, %__qsVar6__s__ + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %27 = phi i1 [ %25, %condContinue__1 ], [ %26, %condFalse__2 ] + br i1 %27, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %28 = icmp eq i64 %__qsVar4__q__, %__qsVar5__r__ + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %29 = phi i1 [ %27, %condContinue__2 ], [ %28, %condFalse__3 ] + br i1 %29, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %30 = icmp eq i64 %__qsVar4__q__, %__qsVar6__s__ + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %31 = phi i1 [ %29, %condContinue__3 ], [ %30, %condFalse__4 ] + br i1 %31, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %32 = icmp eq i64 %__qsVar5__r__, %__qsVar6__s__ + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %33 = phi i1 [ %31, %condContinue__4 ], [ %32, %condFalse__5 ] + br i1 %33, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @0, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar5__r__) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar6__s__) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @2, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %50) + unreachable + +continue__1: ; preds = %condContinue__5 + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 -1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 -1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 -1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 -1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 1, i2* %93, align 1 + store i2 1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 -1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %__qsVar10__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %139 = phi i64 [ 0, %continue__1 ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 1) + %145 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 0) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 1) + %149 = bitcast i8* %148 to i64* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 2) + %151 = bitcast i8* %150 to i64* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 3) + %153 = bitcast i8* %152 to i64* + store i64 %__qsVar3__p__, i64* %147, align 4 + store i64 %__qsVar4__q__, i64* %149, align 4 + store i64 %__qsVar5__r__, i64* %151, align 4 + store i64 %__qsVar6__s__, i64* %153, align 4 + %154 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %145) + %155 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 0 + %__qsVar11__sortedIndices__ = load %Array*, %Array** %155, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 1) + %156 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 1 + %__qsVar12__signs__ = load %Array*, %Array** %156, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 1) + %157 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 2 + %__qsVar13__globalSign__ = load double, double* %157, align 8 + %158 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %159 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %160 = call i64 @__quantum__rt__array_get_size_1d(%Array* %159) + %161 = sub i64 %160, 1 + %162 = insertvalue %Range zeroinitializer, i64 %161, 0 + %163 = insertvalue %Range %162, i64 -1, 1 + %164 = insertvalue %Range %163, i64 0, 2 + %165 = call %Array* @__quantum__rt__array_slice_1d(%Array* %158, %Range %164, i1 true) + %166 = call i64 @__quantum__rt__array_get_size_1d(%Array* %165) + %167 = sub i64 %166, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %168 = phi i64 [ 0, %exit__1 ], [ %182, %exiting__2 ] + %169 = icmp sle i64 %168, %167 + br i1 %169, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 %168) + %171 = bitcast i8* %170 to { %Array*, double }** + %172 = load { %Array*, double }*, { %Array*, double }** %171, align 8 + %173 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %172, i32 0, i32 0 + %__qsVar14__op__ = load %Array*, %Array** %173, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 1) + %174 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %172, i32 0, i32 1 + %__qsVar15__sign__ = load double, double* %174, align 8 + %175 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar16__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %175, %Array* %__qsVar11__sortedIndices__, %Array* %__qsVar14__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + %176 = fmul double %__qsVar13__globalSign__, %__qsVar15__sign__ + %theta = fmul double %176, %__qsVar7__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %177 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %178 = bitcast %Tuple* %177 to { %Array*, double, %Array* }* + %179 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 0 + %180 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 1 + %181 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 2 + store %Array* %__qsVar16__pauliString__, %Array** %179, align 8 + store double %theta, double* %180, align 8 + store %Array* %qubits, %Array** %181, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %178) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %177, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %182 = add i64 %168, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %183 = phi i64 [ 0, %exit__2 ], [ %188, %exiting__3 ] + %184 = icmp sle i64 %183, 7 + br i1 %184, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %183) + %186 = bitcast i8* %185 to %Array** + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %187, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %188 = add i64 %183, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %189 = phi i64 [ 0, %exit__3 ], [ %194, %exiting__4 ] + %190 = icmp sle i64 %189, 7 + br i1 %190, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %189) + %192 = bitcast i8* %191 to %Array** + %193 = load %Array*, %Array** %192, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %193, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %194 = add i64 %189, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar12__signs__, i32 -1) + %195 = bitcast { %Array*, %Array*, double }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %195, i32 -1) + %196 = call i64 @__quantum__rt__array_get_size_1d(%Array* %158) + %197 = sub i64 %196, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %198 = phi i64 [ 0, %exit__4 ], [ %206, %exiting__5 ] + %199 = icmp sle i64 %198, %197 + br i1 %199, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %200 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %198) + %201 = bitcast i8* %200 to { %Array*, double }** + %202 = load { %Array*, double }*, { %Array*, double }** %201, align 8 + %203 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %202, i32 0, i32 0 + %204 = load %Array*, %Array** %203, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %204, i32 -1) + %205 = bitcast { %Array*, double }* %202 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %205, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %206 = add i64 %198, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %158, i32 -1) + %207 = sub i64 %160, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %208 = phi i64 [ 0, %exit__5 ], [ %216, %exiting__6 ] + %209 = icmp sle i64 %208, %207 + br i1 %209, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %159, i64 %208) + %211 = bitcast i8* %210 to { %Array*, double }** + %212 = load { %Array*, double }*, { %Array*, double }** %211, align 8 + %213 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %212, i32 0, i32 0 + %214 = load %Array*, %Array** %213, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %214, i32 -1) + %215 = bitcast { %Array*, double }* %212 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %215, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %216 = add i64 %208, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %159, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %165, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQTerm____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %p = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %q = load i64, i64* %10, align 4 + %11 = icmp eq i64 %p, %q + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @3, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__int_to_string(i64 %p) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__int_to_string(i64 %q) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @2, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__1: ; preds = %entry + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %22 = bitcast i8* %21 to double* + %23 = load double, double* %22, align 8 + %24 = fmul double 5.000000e-01, %23 + %angle = fmul double %24, %stepSize + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 1, i2* %27, align 1 + store i2 -1, i2* %29, align 1 + %30 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 0) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 1) + %34 = bitcast i8* %33 to i2* + store i2 -1, i2* %32, align 1 + store i2 1, i2* %34, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %36 = bitcast i8* %35 to %Array** + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %38 = bitcast i8* %37 to %Array** + store %Array* %25, %Array** %36, align 8 + store %Array* %30, %Array** %38, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %44, %exiting__1 ] + %40 = icmp sle i64 %39, 1 + br i1 %40, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %39) + %42 = bitcast i8* %41 to %Array** + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %44 = add i64 %39, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %signs = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 0) + %46 = bitcast i8* %45 to double* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 1) + %48 = bitcast i8* %47 to double* + store double 1.000000e+00, double* %46, align 8 + store double -1.000000e+00, double* %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %ops, %Array* %signs) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %52 = phi i64 [ 0, %exit__1 ], [ %60, %exiting__2 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { %Array*, double }** + %56 = load { %Array*, double }*, { %Array*, double }** %55, align 8 + %57 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %56, i32 0, i32 0 + %op = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %58 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %56, i32 0, i32 1 + %sign = load double, double* %58, align 8 + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %59, %Array* %idxFermions, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %theta = fmul double %sign, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %pauliString, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %52, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %61 = phi i64 [ 0, %exit__2 ], [ %66, %exiting__3 ] + %62 = icmp sle i64 %61, 1 + br i1 %62, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %61) + %64 = bitcast i8* %63 to %Array** + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %66 = add i64 %61, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %67 = phi i64 [ 0, %exit__3 ], [ %72, %exiting__4 ] + %68 = icmp sle i64 %67, 1 + br i1 %68, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %67) + %70 = bitcast i8* %69 to %Array** + %71 = load %Array*, %Array** %70, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %72 = add i64 %67, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %73 = sub i64 %50, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %74 = phi i64 [ 0, %exit__4 ], [ %82, %exiting__5 ] + %75 = icmp sle i64 %74, %73 + br i1 %75, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %74) + %77 = bitcast i8* %76 to { %Array*, double }** + %78 = load { %Array*, double }*, { %Array*, double }** %77, align 8 + %79 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %78, i32 0, i32 0 + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + %81 = bitcast { %Array*, double }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %81, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %82 = add i64 %74, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQTerm____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__p__ = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %__qsVar4__q__ = load i64, i64* %10, align 4 + %11 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @3, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @2, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__1: ; preds = %entry + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %22 = bitcast i8* %21 to double* + %23 = load double, double* %22, align 8 + %24 = fmul double 5.000000e-01, %23 + %__qsVar5__angle__ = fmul double %24, %stepSize + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 1, i2* %27, align 1 + store i2 -1, i2* %29, align 1 + %30 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 0) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 1) + %34 = bitcast i8* %33 to i2* + store i2 -1, i2* %32, align 1 + store i2 1, i2* %34, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %36 = bitcast i8* %35 to %Array** + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %38 = bitcast i8* %37 to %Array** + store %Array* %25, %Array** %36, align 8 + store %Array* %30, %Array** %38, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %44, %exiting__1 ] + %40 = icmp sle i64 %39, 1 + br i1 %40, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %39) + %42 = bitcast i8* %41 to %Array** + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %44 = add i64 %39, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %__qsVar7__signs__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 0) + %46 = bitcast i8* %45 to double* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 1) + %48 = bitcast i8* %47 to double* + store double 1.000000e+00, double* %46, align 8 + store double -1.000000e+00, double* %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %50 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %51 = call i64 @__quantum__rt__array_get_size_1d(%Array* %50) + %52 = sub i64 %51, 1 + %53 = insertvalue %Range zeroinitializer, i64 %52, 0 + %54 = insertvalue %Range %53, i64 -1, 1 + %55 = insertvalue %Range %54, i64 0, 2 + %56 = call %Array* @__quantum__rt__array_slice_1d(%Array* %49, %Range %55, i1 true) + %57 = call i64 @__quantum__rt__array_get_size_1d(%Array* %56) + %58 = sub i64 %57, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %59 = phi i64 [ 0, %exit__1 ], [ %67, %exiting__2 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %59) + %62 = bitcast i8* %61 to { %Array*, double }** + %63 = load { %Array*, double }*, { %Array*, double }** %62, align 8 + %64 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 0 + %__qsVar8__op__ = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %65 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 1 + %__qsVar9__sign__ = load double, double* %65, align 8 + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar10__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %66, %Array* %__qsVar2__idxFermions__, %Array* %__qsVar8__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + %theta = fmul double %__qsVar9__sign__, %__qsVar5__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %__qsVar10__pauliString__, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %67 = add i64 %59, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %68 = phi i64 [ 0, %exit__2 ], [ %73, %exiting__3 ] + %69 = icmp sle i64 %68, 1 + br i1 %69, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %68) + %71 = bitcast i8* %70 to %Array** + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %73 = add i64 %68, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %74 = phi i64 [ 0, %exit__3 ], [ %79, %exiting__4 ] + %75 = icmp sle i64 %74, 1 + br i1 %75, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %74) + %77 = bitcast i8* %76 to %Array** + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %79 = add i64 %74, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__signs__, i32 -1) + %80 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %81 = sub i64 %80, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %82 = phi i64 [ 0, %exit__4 ], [ %90, %exiting__5 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %82) + %85 = bitcast i8* %84 to { %Array*, double }** + %86 = load { %Array*, double }*, { %Array*, double }** %85, align 8 + %87 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %88, i32 -1) + %89 = bitcast { %Array*, double }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %89, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %90 = add i64 %82, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %91 = sub i64 %51, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %92 = phi i64 [ 0, %exit__5 ], [ %100, %exiting__6 ] + %93 = icmp sle i64 %92, %91 + br i1 %93, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %92) + %95 = bitcast i8* %94 to { %Array*, double }** + %96 = load { %Array*, double }*, { %Array*, double }** %95, align 8 + %97 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %96, i32 0, i32 0 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + %99 = bitcast { %Array*, double }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %99, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %100 = add i64 %92, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %p = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %q = load i64, i64* %14, align 4 + %15 = icmp eq i64 %p, %q + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @3, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__int_to_string(i64 %p) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__int_to_string(i64 %q) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @2, i32 0, i32 0)) + %24 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %23) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %24) + unreachable + +continue__1: ; preds = %entry + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %26 = bitcast i8* %25 to double* + %27 = load double, double* %26, align 8 + %28 = fmul double 5.000000e-01, %27 + %angle = fmul double %28, %stepSize + %29 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 0) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 1) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %31, align 1 + store i2 -1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + store i2 -1, i2* %36, align 1 + store i2 1, i2* %38, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %40 = bitcast i8* %39 to %Array** + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %42 = bitcast i8* %41 to %Array** + store %Array* %29, %Array** %40, align 8 + store %Array* %34, %Array** %42, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %43 = phi i64 [ 0, %continue__1 ], [ %48, %exiting__1 ] + %44 = icmp sle i64 %43, 1 + br i1 %44, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %48 = add i64 %43, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %signs = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 0) + %50 = bitcast i8* %49 to double* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 1) + %52 = bitcast i8* %51 to double* + store double 1.000000e+00, double* %50, align 8 + store double -1.000000e+00, double* %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %53 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %ops, %Array* %signs) + %54 = call i64 @__quantum__rt__array_get_size_1d(%Array* %53) + %55 = sub i64 %54, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %56 = phi i64 [ 0, %exit__1 ], [ %69, %exiting__2 ] + %57 = icmp sle i64 %56, %55 + br i1 %57, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %56) + %59 = bitcast i8* %58 to { %Array*, double }** + %60 = load { %Array*, double }*, { %Array*, double }** %59, align 8 + %61 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %60, i32 0, i32 0 + %op = load %Array*, %Array** %61, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %62 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %60, i32 0, i32 1 + %sign = load double, double* %62, align 8 + %63 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %63, %Array* %idxFermions, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %theta = fmul double %sign, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { %Array*, double, %Array* }* + %66 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 2 + store %Array* %pauliString, %Array** %66, align 8 + store double %theta, double* %67, align 8 + store %Array* %qubits, %Array** %68, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %65) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %69 = add i64 %56, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %70 = phi i64 [ 0, %exit__2 ], [ %75, %exiting__3 ] + %71 = icmp sle i64 %70, 1 + br i1 %71, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %70) + %73 = bitcast i8* %72 to %Array** + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %75 = add i64 %70, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %76 = phi i64 [ 0, %exit__3 ], [ %81, %exiting__4 ] + %77 = icmp sle i64 %76, 1 + br i1 %77, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %76) + %79 = bitcast i8* %78 to %Array** + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %81 = add i64 %76, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %82 = sub i64 %54, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %83 = phi i64 [ 0, %exit__4 ], [ %91, %exiting__5 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %83) + %86 = bitcast i8* %85 to { %Array*, double }** + %87 = load { %Array*, double }*, { %Array*, double }** %86, align 8 + %88 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 -1) + %90 = bitcast { %Array*, double }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %90, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %91 = add i64 %83, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__p__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %__qsVar4__q__ = load i64, i64* %14, align 4 + %15 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @3, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @1, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @2, i32 0, i32 0)) + %24 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %23) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %24) + unreachable + +continue__1: ; preds = %entry + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %26 = bitcast i8* %25 to double* + %27 = load double, double* %26, align 8 + %28 = fmul double 5.000000e-01, %27 + %__qsVar5__angle__ = fmul double %28, %stepSize + %29 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 0) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 1) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %31, align 1 + store i2 -1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + store i2 -1, i2* %36, align 1 + store i2 1, i2* %38, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %40 = bitcast i8* %39 to %Array** + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %42 = bitcast i8* %41 to %Array** + store %Array* %29, %Array** %40, align 8 + store %Array* %34, %Array** %42, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %43 = phi i64 [ 0, %continue__1 ], [ %48, %exiting__1 ] + %44 = icmp sle i64 %43, 1 + br i1 %44, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %48 = add i64 %43, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %__qsVar7__signs__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 0) + %50 = bitcast i8* %49 to double* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 1) + %52 = bitcast i8* %51 to double* + store double 1.000000e+00, double* %50, align 8 + store double -1.000000e+00, double* %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 1) + %53 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %54 = call %Array* @Microsoft__Quantum__Arrays___3d2e1155dc4b4f3ab6fe59d3ebd7442c_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %55 = call i64 @__quantum__rt__array_get_size_1d(%Array* %54) + %56 = sub i64 %55, 1 + %57 = insertvalue %Range zeroinitializer, i64 %56, 0 + %58 = insertvalue %Range %57, i64 -1, 1 + %59 = insertvalue %Range %58, i64 0, 2 + %60 = call %Array* @__quantum__rt__array_slice_1d(%Array* %53, %Range %59, i1 true) + %61 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %62 = sub i64 %61, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %63 = phi i64 [ 0, %exit__1 ], [ %76, %exiting__2 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %63) + %66 = bitcast i8* %65 to { %Array*, double }** + %67 = load { %Array*, double }*, { %Array*, double }** %66, align 8 + %68 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %67, i32 0, i32 0 + %__qsVar8__op__ = load %Array*, %Array** %68, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %69 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %67, i32 0, i32 1 + %__qsVar9__sign__ = load double, double* %69, align 8 + %70 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar10__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %70, %Array* %__qsVar2__idxFermions__, %Array* %__qsVar8__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + %theta = fmul double %__qsVar9__sign__, %__qsVar5__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { %Array*, double, %Array* }* + %73 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 1 + %75 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 2 + store %Array* %__qsVar10__pauliString__, %Array** %73, align 8 + store double %theta, double* %74, align 8 + store %Array* %qubits, %Array** %75, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %72) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %63, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %77 = phi i64 [ 0, %exit__2 ], [ %82, %exiting__3 ] + %78 = icmp sle i64 %77, 1 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %77) + %80 = bitcast i8* %79 to %Array** + %81 = load %Array*, %Array** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %82 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %83 = phi i64 [ 0, %exit__3 ], [ %88, %exiting__4 ] + %84 = icmp sle i64 %83, 1 + br i1 %84, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %83) + %86 = bitcast i8* %85 to %Array** + %87 = load %Array*, %Array** %86, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %88 = add i64 %83, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__signs__, i32 -1) + %89 = call i64 @__quantum__rt__array_get_size_1d(%Array* %53) + %90 = sub i64 %89, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %91 = phi i64 [ 0, %exit__4 ], [ %99, %exiting__5 ] + %92 = icmp sle i64 %91, %90 + br i1 %92, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %91) + %94 = bitcast i8* %93 to { %Array*, double }** + %95 = load { %Array*, double }*, { %Array*, double }** %94, align 8 + %96 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %95, i32 0, i32 0 + %97 = load %Array*, %Array** %96, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %97, i32 -1) + %98 = bitcast { %Array*, double }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %99 = add i64 %91, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + %100 = sub i64 %55, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %101 = phi i64 [ 0, %exit__5 ], [ %109, %exiting__6 ] + %102 = icmp sle i64 %101, %100 + br i1 %102, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %54, i64 %101) + %104 = bitcast i8* %103 to { %Array*, double }** + %105 = load { %Array*, double }*, { %Array*, double }** %104, align 8 + %106 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %105, i32 0, i32 0 + %107 = load %Array*, %Array** %106, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %107, i32 -1) + %108 = bitcast { %Array*, double }* %105 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %108, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %109 = add i64 %101, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorFunction____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__3__FunctionTable, %Tuple* %11) + %16 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret { %Callable* }* %16 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____body({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____adj({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____ctl(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____ctladj(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__3__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__3__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %8 = bitcast i8* %7 to i64* + %termType = load i64, i64* %8, align 4 + %9 = icmp eq i64 %termType, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQTerm____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %termType, 2 + br i1 %10, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQRSTerm____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__termType__ = load i64, i64* %8, align 4 + %9 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQTerm____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %10, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQRSTerm____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %12 = bitcast i8* %11 to i64* + %termType = load i64, i64* %12, align 4 + %13 = icmp eq i64 %termType, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %termType, 2 + br i1 %19, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQRSTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorImpl____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__termType__ = load i64, i64* %12, align 4 + %13 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %19, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWignerClusterOperatorPQRSTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____body(%Callable* %oracle, i64 %nSystemQubits, %Array* %allQubits) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %allQubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %allQubits) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range zeroinitializer, i64 %nSystemQubits, 0 + %3 = insertvalue %Range %2, i64 1, 1 + %4 = insertvalue %Range %3, i64 %1, 2 + %5 = call %Array* @__quantum__rt__array_slice_1d(%Array* %allQubits, %Range %4, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + %6 = sub i64 %nSystemQubits, 1 + %7 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %6, 2 + %8 = call %Array* @__quantum__rt__array_slice_1d(%Array* %allQubits, %Range %7, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Array* }* + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %10, i32 0, i32 1 + store %Array* %5, %Array** %11, align 8 + store %Array* %8, %Array** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %oracle, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %allQubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____adj(%Callable* %oracle, i64 %nSystemQubits, %Array* %allQubits) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %allQubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %allQubits) + %2 = sub i64 %1, 1 + %3 = insertvalue %Range zeroinitializer, i64 %nSystemQubits, 0 + %4 = insertvalue %Range %3, i64 1, 1 + %5 = insertvalue %Range %4, i64 %2, 2 + %6 = call %Array* @__quantum__rt__array_slice_1d(%Array* %allQubits, %Range %5, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + %7 = sub i64 %nSystemQubits, 1 + %8 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %7, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %allQubits, %Range %8, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + store %Array* %6, %Array** %12, align 8 + store %Array* %9, %Array** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %10, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %allQubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____ctl(%Array* %__controlQubits__, { %Callable*, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %oracle = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %2 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %nSystemQubits = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %allQubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %allQubits, i32 1) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %allQubits) + %6 = sub i64 %5, 1 + %7 = insertvalue %Range zeroinitializer, i64 %nSystemQubits, 0 + %8 = insertvalue %Range %7, i64 1, 1 + %9 = insertvalue %Range %8, i64 %6, 2 + %10 = call %Array* @__quantum__rt__array_slice_1d(%Array* %allQubits, %Range %9, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + %11 = sub i64 %nSystemQubits, 1 + %12 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %11, 2 + %13 = call %Array* @__quantum__rt__array_slice_1d(%Array* %allQubits, %Range %12, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %15, i32 0, i32 1 + store %Array* %10, %Array** %16, align 8 + store %Array* %13, %Array** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { %Array*, %Array* }* }* + %20 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store { %Array*, %Array* }* %15, { %Array*, %Array* }** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %allQubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____ctladj(%Array* %__controlQubits__, { %Callable*, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %oracle = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %2 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %nSystemQubits = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %allQubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %allQubits, i32 1) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_controlled(%Callable* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %allQubits) + %6 = sub i64 %5, 1 + %7 = insertvalue %Range zeroinitializer, i64 %nSystemQubits, 0 + %8 = insertvalue %Range %7, i64 1, 1 + %9 = insertvalue %Range %8, i64 %6, 2 + %10 = call %Array* @__quantum__rt__array_slice_1d(%Array* %allQubits, %Range %9, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + %11 = sub i64 %nSystemQubits, 1 + %12 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %11, 2 + %13 = call %Array* @__quantum__rt__array_slice_1d(%Array* %allQubits, %Range %12, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %15, i32 0, i32 1 + store %Array* %10, %Array** %16, align 8 + store %Array* %13, %Array** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { %Array*, %Array* }* }* + %20 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store { %Array*, %Array* }* %15, { %Array*, %Array* }** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %allQubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PQandPQQRTermToPauliGenIdx_____body({ { %Array*, %Array* }*, %Array* }* %term) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %newCoeff = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoeff, i64 0) + %11 = bitcast i8* %10 to double* + store double %9, double* %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoeff, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %13 = icmp eq i64 %12, 2 + br i1 %13, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %14 = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PQTermToPauliGenIdx_____body({ { %Array*, %Array* }*, %Array* }* %term) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + ret %Array* %14 + +else__1: ; preds = %entry + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %16 = bitcast i8* %15 to i64* + %qubitPidx = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %18 = bitcast i8* %17 to i64* + %qubitQidx = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %20 = bitcast i8* %19 to i64* + %qubitRidx = load i64, i64* %20, align 4 + %21 = icmp slt i64 %qubitPidx, %qubitQidx + br i1 %21, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %else__1 + %22 = icmp slt i64 %qubitQidx, %qubitRidx + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %else__1 + %23 = phi i1 [ %22, %condTrue__1 ], [ %21, %else__1 ] + br i1 %23, label %then0__2, label %else__2 + +then0__2: ; preds = %condContinue__1 + %24 = sub i64 %qubitQidx, 1 + %25 = insertvalue %Range zeroinitializer, i64 %qubitPidx, 0 + %26 = insertvalue %Range %25, i64 1, 1 + %27 = insertvalue %Range %26, i64 %24, 2 + %28 = call %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %27) + %29 = add i64 %qubitQidx, 1 + %30 = insertvalue %Range zeroinitializer, i64 %29, 0 + %31 = insertvalue %Range %30, i64 1, 1 + %32 = insertvalue %Range %31, i64 %qubitRidx, 2 + %33 = call %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %32) + %qubitIndices = call %Array* @__quantum__rt__array_concatenate(%Array* %28, %Array* %33) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %34 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i64* + store i64 1, i64* %36, align 4 + %37 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitIndices) + %38 = sub i64 %37, 2 + %39 = call %Array* @Microsoft__Quantum__Arrays___776c59d7915545a6a81beb8cdb98d2a4_ConstantArray__body(i64 %38, i64 3) + %40 = call %Array* @__quantum__rt__array_concatenate(%Array* %34, %Array* %39) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 1) + %41 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 0) + %43 = bitcast i8* %42 to i64* + store i64 1, i64* %43, align 4 + %44 = call %Array* @__quantum__rt__array_concatenate(%Array* %40, %Array* %41) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %39, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 1) + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { %Array*, %Array* }* + %47 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %46, i32 0, i32 1 + store %Array* %44, %Array** %47, align 8 + store %Array* %newCoeff, %Array** %48, align 8 + %49 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %46, %Array* %qubitIndices) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + %50 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 0) + %52 = bitcast i8* %51 to i64* + store i64 2, i64* %52, align 4 + %53 = sub i64 %37, 2 + %54 = call %Array* @Microsoft__Quantum__Arrays___776c59d7915545a6a81beb8cdb98d2a4_ConstantArray__body(i64 %53, i64 3) + %55 = call %Array* @__quantum__rt__array_concatenate(%Array* %50, %Array* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 1) + %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i64* + store i64 2, i64* %58, align 4 + %59 = call %Array* @__quantum__rt__array_concatenate(%Array* %55, %Array* %56) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 1) + %60 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %61 = bitcast %Tuple* %60 to { %Array*, %Array* }* + %62 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %61, i32 0, i32 0 + %63 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %61, i32 0, i32 1 + store %Array* %59, %Array** %62, align 8 + store %Array* %newCoeff, %Array** %63, align 8 + %64 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %61, %Array* %qubitIndices) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %60, i32 -1) + %65 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to { { %Array*, %Array* }*, %Array* }** + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to { { %Array*, %Array* }*, %Array* }** + store { { %Array*, %Array* }*, %Array* }* %49, { { %Array*, %Array* }*, %Array* }** %67, align 8 + store { { %Array*, %Array* }*, %Array* }* %64, { { %Array*, %Array* }*, %Array* }** %69, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices, i32 -1) + ret %Array* %65 + +else__2: ; preds = %condContinue__1 + %70 = insertvalue %Range zeroinitializer, i64 %qubitPidx, 0 + %71 = insertvalue %Range %70, i64 1, 1 + %72 = insertvalue %Range %71, i64 %qubitRidx, 2 + %73 = call %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %72) + %74 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i64* + store i64 %qubitQidx, i64* %76, align 4 + %qubitIndices__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %73, %Array* %74) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices__1, i32 1) + %77 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 0) + %79 = bitcast i8* %78 to i64* + store i64 1, i64* %79, align 4 + %80 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitIndices__1) + %81 = sub i64 %80, 3 + %82 = call %Array* @Microsoft__Quantum__Arrays___776c59d7915545a6a81beb8cdb98d2a4_ConstantArray__body(i64 %81, i64 3) + %83 = call %Array* @__quantum__rt__array_concatenate(%Array* %77, %Array* %82) + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 1) + %84 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %84, i64 0) + %86 = bitcast i8* %85 to i64* + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %84, i64 1) + %88 = bitcast i8* %87 to i64* + store i64 1, i64* %86, align 4 + store i64 3, i64* %88, align 4 + %89 = call %Array* @__quantum__rt__array_concatenate(%Array* %83, %Array* %84) + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %77, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %84, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 1) + %90 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %91 = bitcast %Tuple* %90 to { %Array*, %Array* }* + %92 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %91, i32 0, i32 0 + %93 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %91, i32 0, i32 1 + store %Array* %89, %Array** %92, align 8 + store %Array* %newCoeff, %Array** %93, align 8 + %94 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %91, %Array* %qubitIndices__1) + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %90, i32 -1) + %95 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %95, i64 0) + %97 = bitcast i8* %96 to i64* + store i64 2, i64* %97, align 4 + %98 = sub i64 %80, 3 + %99 = call %Array* @Microsoft__Quantum__Arrays___776c59d7915545a6a81beb8cdb98d2a4_ConstantArray__body(i64 %98, i64 3) + %100 = call %Array* @__quantum__rt__array_concatenate(%Array* %95, %Array* %99) + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 1) + %101 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i64* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i64* + store i64 2, i64* %103, align 4 + store i64 3, i64* %105, align 4 + %106 = call %Array* @__quantum__rt__array_concatenate(%Array* %100, %Array* %101) + call void @__quantum__rt__array_update_reference_count(%Array* %106, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %95, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %101, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %106, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 1) + %107 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %108 = bitcast %Tuple* %107 to { %Array*, %Array* }* + %109 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %108, i32 0, i32 0 + %110 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %108, i32 0, i32 1 + store %Array* %106, %Array** %109, align 8 + store %Array* %newCoeff, %Array** %110, align 8 + %111 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %108, %Array* %qubitIndices__1) + call void @__quantum__rt__array_update_reference_count(%Array* %106, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %107, i32 -1) + %112 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %112, i64 0) + %114 = bitcast i8* %113 to { { %Array*, %Array* }*, %Array* }** + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %112, i64 1) + %116 = bitcast i8* %115 to { { %Array*, %Array* }*, %Array* }** + store { { %Array*, %Array* }*, %Array* }* %94, { { %Array*, %Array* }*, %Array* }** %114, align 8 + store { { %Array*, %Array* }*, %Array* }* %111, { { %Array*, %Array* }*, %Array* }** %116, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %74, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices__1, i32 -1) + ret %Array* %112 + +continue__2: ; No predecessors! + unreachable + +continue__1: ; No predecessors! + unreachable +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PQTermToPauliGenIdx_____body({ { %Array*, %Array* }*, %Array* }* %term) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %newCoeff = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoeff, i64 0) + %11 = bitcast i8* %10 to double* + store double %9, double* %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoeff, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %13 = bitcast i8* %12 to i64* + %qubitPidx = load i64, i64* %13, align 4 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %15 = bitcast i8* %14 to i64* + %qubitQidx = load i64, i64* %15, align 4 + %16 = insertvalue %Range zeroinitializer, i64 %qubitPidx, 0 + %17 = insertvalue %Range %16, i64 1, 1 + %18 = insertvalue %Range %17, i64 %qubitQidx, 2 + %qubitIndices = call %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %18) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 0) + %21 = bitcast i8* %20 to i64* + store i64 1, i64* %21, align 4 + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitIndices) + %23 = sub i64 %22, 2 + %24 = call %Array* @Microsoft__Quantum__Arrays___776c59d7915545a6a81beb8cdb98d2a4_ConstantArray__body(i64 %23, i64 3) + %25 = call %Array* @__quantum__rt__array_concatenate(%Array* %19, %Array* %24) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 1) + %26 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %26, i64 0) + %28 = bitcast i8* %27 to i64* + store i64 1, i64* %28, align 4 + %29 = call %Array* @__quantum__rt__array_concatenate(%Array* %25, %Array* %26) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array*, %Array* }* + %32 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 1 + store %Array* %29, %Array** %32, align 8 + store %Array* %newCoeff, %Array** %33, align 8 + %34 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %31, %Array* %qubitIndices) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + %35 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 0) + %37 = bitcast i8* %36 to i64* + store i64 2, i64* %37, align 4 + %38 = sub i64 %22, 2 + %39 = call %Array* @Microsoft__Quantum__Arrays___776c59d7915545a6a81beb8cdb98d2a4_ConstantArray__body(i64 %38, i64 3) + %40 = call %Array* @__quantum__rt__array_concatenate(%Array* %35, %Array* %39) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 1) + %41 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 0) + %43 = bitcast i8* %42 to i64* + store i64 2, i64* %43, align 4 + %44 = call %Array* @__quantum__rt__array_concatenate(%Array* %40, %Array* %41) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %39, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 1) + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { %Array*, %Array* }* + %47 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %46, i32 0, i32 1 + store %Array* %44, %Array** %47, align 8 + store %Array* %newCoeff, %Array** %48, align 8 + %49 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %46, %Array* %qubitIndices) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + %50 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 0) + %52 = bitcast i8* %51 to { { %Array*, %Array* }*, %Array* }** + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 1) + %54 = bitcast i8* %53 to { { %Array*, %Array* }*, %Array* }** + store { { %Array*, %Array* }*, %Array* }* %34, { { %Array*, %Array* }*, %Array* }** %52, align 8 + store { { %Array*, %Array* }*, %Array* }* %49, { { %Array*, %Array* }*, %Array* }** %54, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices, i32 -1) + ret %Array* %50 +} + +define internal %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %range) { +entry: + %array = alloca %Array*, align 8 + %start = extractvalue %Range %range, 0 + %0 = extractvalue %Range %range, 1 + %1 = extractvalue %Range %range, 2 + %2 = extractvalue %Range %range, 0 + %step = extractvalue %Range %range, 1 + %3 = extractvalue %Range %range, 2 + %4 = extractvalue %Range %range, 0 + %5 = extractvalue %Range %range, 1 + %end = extractvalue %Range %range, 2 + %6 = sub i64 %end, %start + %7 = sitofp i64 %6 to double + %8 = sitofp i64 %step to double + %9 = fdiv double %7, %8 + %10 = fcmp oge double %9, 0.000000e+00 + br i1 %10, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %11 = sub i64 %end, %start + %12 = sdiv i64 %11, %step + %nTerms = add i64 %12, 1 + %13 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nTerms) + %14 = sub i64 %nTerms, 1 + br label %header__1 + +else__1: ; preds = %entry + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + ret %Array* %15 + +continue__1: ; No predecessors! + unreachable + +header__1: ; preds = %exiting__1, %then0__1 + %16 = phi i64 [ 0, %then0__1 ], [ %20, %exiting__1 ] + %17 = icmp sle i64 %16, %14 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %16) + %19 = bitcast i8* %18 to i64* + store i64 0, i64* %19, align 4 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %16, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %13, %Array** %array, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %21 = sub i64 %nTerms, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %22 = icmp sle i64 %idx, %21 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = load %Array*, %Array** %array, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + %24 = call %Array* @__quantum__rt__array_copy(%Array* %23, i1 false) + %25 = mul i64 %idx, %step + %26 = add i64 %start, %25 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 %idx) + %28 = bitcast i8* %27 to i64* + store i64 %26, i64* %28, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %24, i32 1) + store %Array* %24, %Array** %array, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %30 = load %Array*, %Array** %array, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + ret %Array* %30 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %0, %Array* %__Item3__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array* }*, %Array* }* getelementptr ({ { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { %Array*, %Array* }*, %Array* }* + %3 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %2, i32 0, i32 1 + store { %Array*, %Array* }* %0, { %Array*, %Array* }** %3, align 8 + store %Array* %__Item3__, %Array** %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + %9 = bitcast { %Array*, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__Item3__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %2 +} + +define internal %Array* @Microsoft__Quantum__Arrays___776c59d7915545a6a81beb8cdb98d2a4_ConstantArray__body(i64 %length, i64 %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i64* + store i64 %value, i64* %5, align 4 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal %Callable* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PrepareSingleConfigurationalStateSingleSiteOccupation____body(%Array* %qubitIndices) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array* }* + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %qubitIndices, %Array** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__4__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + ret %Callable* %5 +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__4__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__4__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %qubitIndices, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + call void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__body(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj(%Array* %qubitIndices, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + call void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__adj(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %qubitIndices = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %qubitIndices = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal { { i64, i64 }*, { double, %Callable* }* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___QubitizationOracleSeperatedRegisters____body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData) { +entry: + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 1 + %data = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %data to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 2 + %statePrepData = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %statePrepData, i32 0, i32 1 + %64 = load %Array*, %Array** %63, align 8 + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %64) + %66 = sub i64 %65, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %67 = phi i64 [ 0, %exit__4 ], [ %78, %exiting__5 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %67) + %70 = bitcast i8* %69 to { { double, double }*, %Array* }** + %71 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %70, align 8 + %72 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 0 + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %74, i32 1) + %75 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { { double, double }*, %Array* }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %78 = add i64 %67, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %79 = bitcast { i64, %Array* }* %statePrepData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + %81 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 0 + %nSpinOrbitals = load i64, i64* %81, align 4 + %82 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %83 = phi i64 [ 0, %exit__5 ], [ %93, %exiting__6 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %83) + %86 = bitcast i8* %85 to { %Array*, %Array* }** + %87 = load { %Array*, %Array* }*, { %Array*, %Array* }** %86, align 8 + %88 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %89, i32 1) + %90 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 1 + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 1) + %92 = bitcast { %Array*, %Array* }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %92, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %93 = add i64 %83, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %94 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %95 = phi i64 [ 0, %exit__6 ], [ %105, %exiting__7 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %95) + %98 = bitcast i8* %97 to { %Array*, %Array* }** + %99 = load { %Array*, %Array* }*, { %Array*, %Array* }** %98, align 8 + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 0 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %101, i32 1) + %102 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { %Array*, %Array* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %105 = add i64 %95, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %106 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %107 = phi i64 [ 0, %exit__7 ], [ %117, %exiting__8 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %107) + %110 = bitcast i8* %109 to { %Array*, %Array* }** + %111 = load { %Array*, %Array* }*, { %Array*, %Array* }** %110, align 8 + %112 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 0 + %113 = load %Array*, %Array** %112, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %113, i32 1) + %114 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 1 + %115 = load %Array*, %Array** %114, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %115, i32 1) + %116 = bitcast { %Array*, %Array* }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %117 = add i64 %107, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %118 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %119 = phi i64 [ 0, %exit__8 ], [ %129, %exiting__9 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %129 = add i64 %119, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %130 = sub i64 %65, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %131 = phi i64 [ 0, %exit__9 ], [ %142, %exiting__10 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %131) + %134 = bitcast i8* %133 to { { double, double }*, %Array* }** + %135 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %134, align 8 + %136 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 0 + %137 = load { double, double }*, { double, double }** %136, align 8 + %138 = bitcast { double, double }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %138, i32 1) + %139 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 1 + %140 = load %Array*, %Array** %139, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + %141 = bitcast { { double, double }*, %Array* }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %142 = add i64 %131, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %143 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %qSharpData, i32 0, i32 3 + %energyShift = load double, double* %143, align 8 + %generatorSystem = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerBlockEncodingGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %data) + %144 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %genIdxFunction = load %Callable*, %Callable** %144, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %genIdxFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genIdxFunction, i32 1) + %145 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %145, i32 1) + %146 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %146, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %genIdxFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genIdxFunction, i32 1) + %147 = call { double, { { %Callable* }* }* }* @Microsoft__Quantum__Simulation__PauliBlockEncoding__body({ i64, %Callable* }* %generatorSystem) + %148 = getelementptr inbounds { double, { { %Callable* }* }* }, { double, { { %Callable* }* }* }* %147, i32 0, i32 0 + %oneNorm = load double, double* %148, align 8 + %149 = getelementptr inbounds { double, { { %Callable* }* }* }, { double, { { %Callable* }* }* }* %147, i32 0, i32 1 + %blockEncodingReflection = load { { %Callable* }* }*, { { %Callable* }* }** %149, align 8 + %150 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %blockEncodingReflection, i32 0, i32 0 + %151 = load { %Callable* }*, { %Callable* }** %150, align 8 + %152 = getelementptr inbounds { %Callable* }, { %Callable* }* %151, i32 0, i32 0 + %153 = load %Callable*, %Callable** %152, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %153, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %153, i32 1) + %154 = bitcast { %Callable* }* %151 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %154, i32 1) + %155 = bitcast { { %Callable* }* }* %blockEncodingReflection to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %155, i32 1) + %156 = sitofp i64 %nTerms to double + %157 = call double @Microsoft__Quantum__Math__Lg__body(double %156) + %nCtrlRegisterQubits = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %157) + %158 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64 }* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 1) to i64)) + %159 = bitcast %Tuple* %158 to { i64, i64 }* + %160 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %159, i32 0, i32 0 + %161 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %159, i32 0, i32 1 + store i64 %nCtrlRegisterQubits, i64* %160, align 4 + store i64 %nSpinOrbitals, i64* %161, align 4 + %162 = call %Callable* @Microsoft__Quantum__Simulation__QuantumWalkByQubitization__body({ { %Callable* }* }* %blockEncodingReflection) + %163 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Callable* }* getelementptr ({ double, %Callable* }, { double, %Callable* }* null, i32 1) to i64)) + %164 = bitcast %Tuple* %163 to { double, %Callable* }* + %165 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %164, i32 0, i32 0 + %166 = getelementptr inbounds { double, %Callable* }, { double, %Callable* }* %164, i32 0, i32 1 + store double %oneNorm, double* %165, align 8 + store %Callable* %162, %Callable** %166, align 8 + %167 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, i64 }*, { double, %Callable* }* }* getelementptr ({ { i64, i64 }*, { double, %Callable* }* }, { { i64, i64 }*, { double, %Callable* }* }* null, i32 1) to i64)) + %168 = bitcast %Tuple* %167 to { { i64, i64 }*, { double, %Callable* }* }* + %169 = getelementptr inbounds { { i64, i64 }*, { double, %Callable* }* }, { { i64, i64 }*, { double, %Callable* }* }* %168, i32 0, i32 0 + %170 = getelementptr inbounds { { i64, i64 }*, { double, %Callable* }* }, { { i64, i64 }*, { double, %Callable* }* }* %168, i32 0, i32 1 + store { i64, i64 }* %159, { i64, i64 }** %169, align 8 + store { double, %Callable* }* %164, { double, %Callable* }** %170, align 8 + %171 = sub i64 %3, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %172 = phi i64 [ 0, %exit__10 ], [ %182, %exiting__11 ] + %173 = icmp sle i64 %172, %171 + br i1 %173, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %174 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %172) + %175 = bitcast i8* %174 to { %Array*, %Array* }** + %176 = load { %Array*, %Array* }*, { %Array*, %Array* }** %175, align 8 + %177 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %176, i32 0, i32 0 + %178 = load %Array*, %Array** %177, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %178, i32 -1) + %179 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %176, i32 0, i32 1 + %180 = load %Array*, %Array** %179, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %180, i32 -1) + %181 = bitcast { %Array*, %Array* }* %176 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %181, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %182 = add i64 %172, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %183 = sub i64 %18, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %184 = phi i64 [ 0, %exit__11 ], [ %194, %exiting__12 ] + %185 = icmp sle i64 %184, %183 + br i1 %185, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %184) + %187 = bitcast i8* %186 to { %Array*, %Array* }** + %188 = load { %Array*, %Array* }*, { %Array*, %Array* }** %187, align 8 + %189 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %188, i32 0, i32 0 + %190 = load %Array*, %Array** %189, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %190, i32 -1) + %191 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %188, i32 0, i32 1 + %192 = load %Array*, %Array** %191, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %192, i32 -1) + %193 = bitcast { %Array*, %Array* }* %188 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %193, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %194 = add i64 %184, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %195 = sub i64 %33, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %196 = phi i64 [ 0, %exit__12 ], [ %206, %exiting__13 ] + %197 = icmp sle i64 %196, %195 + br i1 %197, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %198 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %196) + %199 = bitcast i8* %198 to { %Array*, %Array* }** + %200 = load { %Array*, %Array* }*, { %Array*, %Array* }** %199, align 8 + %201 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %200, i32 0, i32 0 + %202 = load %Array*, %Array** %201, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %202, i32 -1) + %203 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %200, i32 0, i32 1 + %204 = load %Array*, %Array** %203, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %204, i32 -1) + %205 = bitcast { %Array*, %Array* }* %200 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %205, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %206 = add i64 %196, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %207 = sub i64 %48, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %208 = phi i64 [ 0, %exit__13 ], [ %218, %exiting__14 ] + %209 = icmp sle i64 %208, %207 + br i1 %209, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %208) + %211 = bitcast i8* %210 to { %Array*, %Array* }** + %212 = load { %Array*, %Array* }*, { %Array*, %Array* }** %211, align 8 + %213 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %212, i32 0, i32 0 + %214 = load %Array*, %Array** %213, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %214, i32 -1) + %215 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %212, i32 0, i32 1 + %216 = load %Array*, %Array** %215, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %216, i32 -1) + %217 = bitcast { %Array*, %Array* }* %212 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %217, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %218 = add i64 %208, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %219 = sub i64 %65, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %220 = phi i64 [ 0, %exit__14 ], [ %231, %exiting__15 ] + %221 = icmp sle i64 %220, %219 + br i1 %221, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %220) + %223 = bitcast i8* %222 to { { double, double }*, %Array* }** + %224 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %223, align 8 + %225 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %224, i32 0, i32 0 + %226 = load { double, double }*, { double, double }** %225, align 8 + %227 = bitcast { double, double }* %226 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %227, i32 -1) + %228 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %224, i32 0, i32 1 + %229 = load %Array*, %Array** %228, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %229, i32 -1) + %230 = bitcast { { double, double }*, %Array* }* %224 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %230, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %231 = add i64 %220, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + %232 = sub i64 %3, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %233 = phi i64 [ 0, %exit__15 ], [ %243, %exiting__16 ] + %234 = icmp sle i64 %233, %232 + br i1 %234, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %235 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %233) + %236 = bitcast i8* %235 to { %Array*, %Array* }** + %237 = load { %Array*, %Array* }*, { %Array*, %Array* }** %236, align 8 + %238 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %237, i32 0, i32 0 + %239 = load %Array*, %Array** %238, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %239, i32 -1) + %240 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %237, i32 0, i32 1 + %241 = load %Array*, %Array** %240, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %241, i32 -1) + %242 = bitcast { %Array*, %Array* }* %237 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %242, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %243 = add i64 %233, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %244 = sub i64 %18, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %245 = phi i64 [ 0, %exit__16 ], [ %255, %exiting__17 ] + %246 = icmp sle i64 %245, %244 + br i1 %246, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %247 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %245) + %248 = bitcast i8* %247 to { %Array*, %Array* }** + %249 = load { %Array*, %Array* }*, { %Array*, %Array* }** %248, align 8 + %250 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %249, i32 0, i32 0 + %251 = load %Array*, %Array** %250, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %251, i32 -1) + %252 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %249, i32 0, i32 1 + %253 = load %Array*, %Array** %252, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %253, i32 -1) + %254 = bitcast { %Array*, %Array* }* %249 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %254, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %255 = add i64 %245, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %256 = sub i64 %33, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %257 = phi i64 [ 0, %exit__17 ], [ %267, %exiting__18 ] + %258 = icmp sle i64 %257, %256 + br i1 %258, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %259 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %257) + %260 = bitcast i8* %259 to { %Array*, %Array* }** + %261 = load { %Array*, %Array* }*, { %Array*, %Array* }** %260, align 8 + %262 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %261, i32 0, i32 0 + %263 = load %Array*, %Array** %262, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %263, i32 -1) + %264 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %261, i32 0, i32 1 + %265 = load %Array*, %Array** %264, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %265, i32 -1) + %266 = bitcast { %Array*, %Array* }* %261 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %266, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %267 = add i64 %257, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %268 = sub i64 %48, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %269 = phi i64 [ 0, %exit__18 ], [ %279, %exiting__19 ] + %270 = icmp sle i64 %269, %268 + br i1 %270, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %271 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %269) + %272 = bitcast i8* %271 to { %Array*, %Array* }** + %273 = load { %Array*, %Array* }*, { %Array*, %Array* }** %272, align 8 + %274 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %273, i32 0, i32 0 + %275 = load %Array*, %Array** %274, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %275, i32 -1) + %276 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %273, i32 0, i32 1 + %277 = load %Array*, %Array** %276, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %277, i32 -1) + %278 = bitcast { %Array*, %Array* }* %273 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %278, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %279 = add i64 %269, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %280 = sub i64 %65, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %281 = phi i64 [ 0, %exit__19 ], [ %292, %exiting__20 ] + %282 = icmp sle i64 %281, %280 + br i1 %282, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %283 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %281) + %284 = bitcast i8* %283 to { { double, double }*, %Array* }** + %285 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %284, align 8 + %286 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %285, i32 0, i32 0 + %287 = load { double, double }*, { double, double }** %286, align 8 + %288 = bitcast { double, double }* %287 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %288, i32 -1) + %289 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %285, i32 0, i32 1 + %290 = load %Array*, %Array** %289, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %290, i32 -1) + %291 = bitcast { { double, double }*, %Array* }* %285 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %291, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %292 = add i64 %281, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %genIdxFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genIdxFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %145, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %genIdxFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genIdxFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %153, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %153, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %154, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %155, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %genIdxFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %genIdxFunction, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %145, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %153, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %153, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %154, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %155, i32 -1) + %293 = bitcast { double, { { %Callable* }* }* }* %147 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %293, i32 -1) + ret { { i64, i64 }*, { double, %Callable* }* }* %168 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerBlockEncodingGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %data) { +entry: + %finalIdx = alloca i64, align 8 + %startIdx = alloca i64, align 8 + %genIdxes = alloca %Array*, align 8 + %0 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 0 + %ZData = load %Array*, %Array** %0, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZData) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %3) + %6 = bitcast i8* %5 to { %Array*, %Array* }** + %7 = load { %Array*, %Array* }*, { %Array*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array*, %Array* }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %14 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 1 + %ZZData = load %Array*, %Array** %14, align 8 + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZZData) + %16 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %17) + %20 = bitcast i8* %19 to { %Array*, %Array* }** + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %28 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 2 + %PQandPQQRData = load %Array*, %Array** %28, align 8 + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %PQandPQQRData) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %31) + %34 = bitcast i8* %33 to { %Array*, %Array* }** + %35 = load { %Array*, %Array* }*, { %Array*, %Array* }** %34, align 8 + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = load %Array*, %Array** %36, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %37, i32 1) + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + %39 = load %Array*, %Array** %38, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 1) + %40 = bitcast { %Array*, %Array* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %42 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 3 + %h0123Data = load %Array*, %Array** %42, align 8 + %43 = call i64 @__quantum__rt__array_get_size_1d(%Array* %h0123Data) + %44 = sub i64 %43, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %55, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %45) + %48 = bitcast i8* %47 to { %Array*, %Array* }** + %49 = load { %Array*, %Array* }*, { %Array*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 0 + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 1) + %52 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 1 + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + %54 = bitcast { %Array*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %55 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %56 = bitcast { %Array*, %Array*, %Array*, %Array* }* %data to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = sub i64 %1, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %58 = phi i64 [ 0, %exit__4 ], [ %68, %exiting__5 ] + %59 = icmp sle i64 %58, %57 + br i1 %59, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %58) + %61 = bitcast i8* %60 to { %Array*, %Array* }** + %62 = load { %Array*, %Array* }*, { %Array*, %Array* }** %61, align 8 + %63 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 0 + %64 = load %Array*, %Array** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %65 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + %67 = bitcast { %Array*, %Array* }* %62 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %68 = add i64 %58, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %69 = sub i64 %15, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %70 = phi i64 [ 0, %exit__5 ], [ %80, %exiting__6 ] + %71 = icmp sle i64 %70, %69 + br i1 %71, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %70) + %73 = bitcast i8* %72 to { %Array*, %Array* }** + %74 = load { %Array*, %Array* }*, { %Array*, %Array* }** %73, align 8 + %75 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 0 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 1 + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %78, i32 1) + %79 = bitcast { %Array*, %Array* }* %74 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %70, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %81 = sub i64 %29, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %82 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %82) + %85 = bitcast i8* %84 to { %Array*, %Array* }** + %86 = load { %Array*, %Array* }*, { %Array*, %Array* }** %85, align 8 + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + %89 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 1 + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 1) + %91 = bitcast { %Array*, %Array* }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %82, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %93 = sub i64 %43, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %94 = phi i64 [ 0, %exit__7 ], [ %104, %exiting__8 ] + %95 = icmp sle i64 %94, %93 + br i1 %95, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %94) + %97 = bitcast i8* %96 to { %Array*, %Array* }** + %98 = load { %Array*, %Array* }*, { %Array*, %Array* }** %97, align 8 + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 1 + %102 = load %Array*, %Array** %101, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %103 = bitcast { %Array*, %Array* }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %104 = add i64 %94, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %105 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %106 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %107 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %108 = bitcast %Tuple* %107 to { %Array*, %Array* }* + %109 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %108, i32 0, i32 0 + %110 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %108, i32 0, i32 1 + store %Array* %105, %Array** %109, align 8 + store %Array* %106, %Array** %110, align 8 + %111 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %112 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array* }*, %Array* }* getelementptr ({ { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* null, i32 1) to i64)) + %113 = bitcast %Tuple* %112 to { { %Array*, %Array* }*, %Array* }* + %114 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %113, i32 0, i32 0 + %115 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %113, i32 0, i32 1 + store { %Array*, %Array* }* %108, { %Array*, %Array* }** %114, align 8 + store %Array* %111, %Array** %115, align 8 + %116 = add i64 %1, %15 + %117 = mul i64 2, %29 + %118 = add i64 %116, %117 + %119 = mul i64 8, %43 + %120 = add i64 %118, %119 + %121 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %120) + %122 = sub i64 %120, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %123 = phi i64 [ 0, %exit__8 ], [ %134, %exiting__9 ] + %124 = icmp sle i64 %123, %122 + br i1 %124, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %121, i64 %123) + %126 = bitcast i8* %125 to { { %Array*, %Array* }*, %Array* }** + store { { %Array*, %Array* }*, %Array* }* %113, { { %Array*, %Array* }*, %Array* }** %126, align 8 + %127 = load { %Array*, %Array* }*, { %Array*, %Array* }** %114, align 8 + %128 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %127, i32 0, i32 0 + %129 = load %Array*, %Array** %128, align 8 + %130 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %127, i32 0, i32 1 + %131 = load %Array*, %Array** %130, align 8 + %132 = load %Array*, %Array** %115, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + %133 = bitcast { %Array*, %Array* }* %127 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %112, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %134 = add i64 %123, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + store %Array* %121, %Array** %genIdxes, align 8 + %135 = sub i64 %120, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %136 = phi i64 [ 0, %exit__9 ], [ %151, %exiting__10 ] + %137 = icmp sle i64 %136, %135 + br i1 %137, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %138 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %121, i64 %136) + %139 = bitcast i8* %138 to { { %Array*, %Array* }*, %Array* }** + %140 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %139, align 8 + %141 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %140, i32 0, i32 0 + %142 = load { %Array*, %Array* }*, { %Array*, %Array* }** %141, align 8 + %143 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %142, i32 0, i32 0 + %144 = load %Array*, %Array** %143, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %144, i32 1) + %145 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %142, i32 0, i32 1 + %146 = load %Array*, %Array** %145, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %146, i32 1) + %147 = bitcast { %Array*, %Array* }* %142 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %147, i32 1) + %148 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %140, i32 0, i32 1 + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 1) + %150 = bitcast { { %Array*, %Array* }*, %Array* }* %140 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %150, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %151 = add i64 %136, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %121, i32 1) + store i64 0, i64* %startIdx, align 4 + %152 = call %Range @Microsoft__Quantum__Arrays___700b015a14454be98b56de747498937e_IndexRange__body(%Array* %ZData) + %153 = extractvalue %Range %152, 0 + %154 = extractvalue %Range %152, 1 + %155 = extractvalue %Range %152, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__10 + %156 = icmp sgt i64 %154, 0 + br label %header__11 + +header__11: ; preds = %exiting__11, %preheader__1 + %idx = phi i64 [ %153, %preheader__1 ], [ %208, %exiting__11 ] + %157 = icmp sle i64 %idx, %155 + %158 = icmp sge i64 %idx, %155 + %159 = select i1 %156, i1 %157, i1 %158 + br i1 %159, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %160 = load %Array*, %Array** %genIdxes, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %160, i32 -1) + %161 = call %Array* @__quantum__rt__array_copy(%Array* %160, i1 false) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %idx) + %163 = bitcast i8* %162 to { %Array*, %Array* }** + %164 = load { %Array*, %Array* }*, { %Array*, %Array* }** %163, align 8 + %165 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 0) + %167 = bitcast i8* %166 to i64* + store i64 0, i64* %167, align 4 + %168 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %164, %Array* %165) + %169 = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ZTermToPauliGenIdx____body({ { %Array*, %Array* }*, %Array* }* %168) + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %169, i64 0) + %171 = bitcast i8* %170 to { { %Array*, %Array* }*, %Array* }** + %172 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %171, align 8 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 %idx) + %174 = bitcast i8* %173 to { { %Array*, %Array* }*, %Array* }** + %175 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %172, i32 0, i32 0 + %176 = load { %Array*, %Array* }*, { %Array*, %Array* }** %175, align 8 + %177 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %176, i32 0, i32 0 + %178 = load %Array*, %Array** %177, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %178, i32 1) + %179 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %176, i32 0, i32 1 + %180 = load %Array*, %Array** %179, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %180, i32 1) + %181 = bitcast { %Array*, %Array* }* %176 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %181, i32 1) + %182 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %172, i32 0, i32 1 + %183 = load %Array*, %Array** %182, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %183, i32 1) + %184 = bitcast { { %Array*, %Array* }*, %Array* }* %172 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %184, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %178, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %180, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %181, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %183, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %184, i32 1) + %185 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %174, align 8 + %186 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %185, i32 0, i32 0 + %187 = load { %Array*, %Array* }*, { %Array*, %Array* }** %186, align 8 + %188 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %187, i32 0, i32 0 + %189 = load %Array*, %Array** %188, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %189, i32 -1) + %190 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %187, i32 0, i32 1 + %191 = load %Array*, %Array** %190, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %191, i32 -1) + %192 = bitcast { %Array*, %Array* }* %187 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %192, i32 -1) + %193 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %185, i32 0, i32 1 + %194 = load %Array*, %Array** %193, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %194, i32 -1) + %195 = bitcast { { %Array*, %Array* }*, %Array* }* %185 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %195, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %189, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %191, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %192, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %194, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %195, i32 -1) + store { { %Array*, %Array* }*, %Array* }* %172, { { %Array*, %Array* }*, %Array* }** %174, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %161, i32 1) + store %Array* %161, %Array** %genIdxes, align 8 + %196 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %168, i32 0, i32 0 + %197 = load { %Array*, %Array* }*, { %Array*, %Array* }** %196, align 8 + %198 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %197, i32 0, i32 0 + %199 = load %Array*, %Array** %198, align 8 + %200 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %197, i32 0, i32 1 + %201 = load %Array*, %Array** %200, align 8 + %202 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %168, i32 0, i32 1 + %203 = load %Array*, %Array** %202, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %160, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %165, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %199, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %201, i32 -1) + %204 = bitcast { %Array*, %Array* }* %197 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %204, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %203, i32 -1) + %205 = bitcast { { %Array*, %Array* }*, %Array* }* %168 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %205, i32 -1) + %206 = call i64 @__quantum__rt__array_get_size_1d(%Array* %169) + %207 = sub i64 %206, 1 + br label %header__12 + +exiting__11: ; preds = %exit__12 + %208 = add i64 %idx, %154 + br label %header__11 + +exit__11: ; preds = %header__11 + store i64 %1, i64* %startIdx, align 4 + %209 = call %Range @Microsoft__Quantum__Arrays___700b015a14454be98b56de747498937e_IndexRange__body(%Array* %ZZData) + %210 = extractvalue %Range %209, 0 + %211 = extractvalue %Range %209, 1 + %212 = extractvalue %Range %209, 2 + br label %preheader__2 + +header__12: ; preds = %exiting__12, %body__11 + %213 = phi i64 [ 0, %body__11 ], [ %228, %exiting__12 ] + %214 = icmp sle i64 %213, %207 + br i1 %214, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %215 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %169, i64 %213) + %216 = bitcast i8* %215 to { { %Array*, %Array* }*, %Array* }** + %217 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %216, align 8 + %218 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %217, i32 0, i32 0 + %219 = load { %Array*, %Array* }*, { %Array*, %Array* }** %218, align 8 + %220 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %219, i32 0, i32 0 + %221 = load %Array*, %Array** %220, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %221, i32 -1) + %222 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %219, i32 0, i32 1 + %223 = load %Array*, %Array** %222, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %223, i32 -1) + %224 = bitcast { %Array*, %Array* }* %219 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %224, i32 -1) + %225 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %217, i32 0, i32 1 + %226 = load %Array*, %Array** %225, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %226, i32 -1) + %227 = bitcast { { %Array*, %Array* }*, %Array* }* %217 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %227, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %228 = add i64 %213, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %169, i32 -1) + br label %exiting__11 + +preheader__2: ; preds = %exit__11 + %229 = icmp sgt i64 %211, 0 + br label %header__13 + +header__13: ; preds = %exiting__13, %preheader__2 + %idx__1 = phi i64 [ %210, %preheader__2 ], [ %283, %exiting__13 ] + %230 = icmp sle i64 %idx__1, %212 + %231 = icmp sge i64 %idx__1, %212 + %232 = select i1 %229, i1 %230, i1 %231 + br i1 %232, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %233 = load %Array*, %Array** %genIdxes, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %233, i32 -1) + %234 = call %Array* @__quantum__rt__array_copy(%Array* %233, i1 false) + %235 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %idx__1) + %236 = bitcast i8* %235 to { %Array*, %Array* }** + %237 = load { %Array*, %Array* }*, { %Array*, %Array* }** %236, align 8 + %238 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %239 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %238, i64 0) + %240 = bitcast i8* %239 to i64* + store i64 1, i64* %240, align 4 + %241 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %237, %Array* %238) + %242 = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ZZTermToPauliGenIdx____body({ { %Array*, %Array* }*, %Array* }* %241) + %243 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 0) + %244 = bitcast i8* %243 to { { %Array*, %Array* }*, %Array* }** + %245 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %244, align 8 + %246 = load i64, i64* %startIdx, align 4 + %247 = add i64 %246, %idx__1 + %248 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %234, i64 %247) + %249 = bitcast i8* %248 to { { %Array*, %Array* }*, %Array* }** + %250 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %245, i32 0, i32 0 + %251 = load { %Array*, %Array* }*, { %Array*, %Array* }** %250, align 8 + %252 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %251, i32 0, i32 0 + %253 = load %Array*, %Array** %252, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %253, i32 1) + %254 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %251, i32 0, i32 1 + %255 = load %Array*, %Array** %254, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %255, i32 1) + %256 = bitcast { %Array*, %Array* }* %251 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %256, i32 1) + %257 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %245, i32 0, i32 1 + %258 = load %Array*, %Array** %257, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %258, i32 1) + %259 = bitcast { { %Array*, %Array* }*, %Array* }* %245 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %259, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %253, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %255, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %256, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %258, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %259, i32 1) + %260 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %249, align 8 + %261 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %260, i32 0, i32 0 + %262 = load { %Array*, %Array* }*, { %Array*, %Array* }** %261, align 8 + %263 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %262, i32 0, i32 0 + %264 = load %Array*, %Array** %263, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %264, i32 -1) + %265 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %262, i32 0, i32 1 + %266 = load %Array*, %Array** %265, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %266, i32 -1) + %267 = bitcast { %Array*, %Array* }* %262 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %267, i32 -1) + %268 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %260, i32 0, i32 1 + %269 = load %Array*, %Array** %268, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %269, i32 -1) + %270 = bitcast { { %Array*, %Array* }*, %Array* }* %260 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %270, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %264, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %266, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %267, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %269, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %270, i32 -1) + store { { %Array*, %Array* }*, %Array* }* %245, { { %Array*, %Array* }*, %Array* }** %249, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %234, i32 1) + store %Array* %234, %Array** %genIdxes, align 8 + %271 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %241, i32 0, i32 0 + %272 = load { %Array*, %Array* }*, { %Array*, %Array* }** %271, align 8 + %273 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %272, i32 0, i32 0 + %274 = load %Array*, %Array** %273, align 8 + %275 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %272, i32 0, i32 1 + %276 = load %Array*, %Array** %275, align 8 + %277 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %241, i32 0, i32 1 + %278 = load %Array*, %Array** %277, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %233, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %238, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %274, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %276, i32 -1) + %279 = bitcast { %Array*, %Array* }* %272 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %279, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %278, i32 -1) + %280 = bitcast { { %Array*, %Array* }*, %Array* }* %241 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %280, i32 -1) + %281 = call i64 @__quantum__rt__array_get_size_1d(%Array* %242) + %282 = sub i64 %281, 1 + br label %header__14 + +exiting__13: ; preds = %exit__14 + %283 = add i64 %idx__1, %211 + br label %header__13 + +exit__13: ; preds = %header__13 + %284 = load i64, i64* %startIdx, align 4 + %285 = add i64 %284, %15 + store i64 %285, i64* %startIdx, align 4 + %286 = call %Range @Microsoft__Quantum__Arrays___700b015a14454be98b56de747498937e_IndexRange__body(%Array* %PQandPQQRData) + %287 = extractvalue %Range %286, 0 + %288 = extractvalue %Range %286, 1 + %289 = extractvalue %Range %286, 2 + br label %preheader__3 + +header__14: ; preds = %exiting__14, %body__13 + %290 = phi i64 [ 0, %body__13 ], [ %305, %exiting__14 ] + %291 = icmp sle i64 %290, %282 + br i1 %291, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %292 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %290) + %293 = bitcast i8* %292 to { { %Array*, %Array* }*, %Array* }** + %294 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %293, align 8 + %295 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %294, i32 0, i32 0 + %296 = load { %Array*, %Array* }*, { %Array*, %Array* }** %295, align 8 + %297 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %296, i32 0, i32 0 + %298 = load %Array*, %Array** %297, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %298, i32 -1) + %299 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %296, i32 0, i32 1 + %300 = load %Array*, %Array** %299, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %300, i32 -1) + %301 = bitcast { %Array*, %Array* }* %296 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %301, i32 -1) + %302 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %294, i32 0, i32 1 + %303 = load %Array*, %Array** %302, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %303, i32 -1) + %304 = bitcast { { %Array*, %Array* }*, %Array* }* %294 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %304, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %305 = add i64 %290, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %242, i32 -1) + br label %exiting__13 + +preheader__3: ; preds = %exit__13 + %306 = icmp sgt i64 %288, 0 + br label %header__15 + +header__15: ; preds = %exiting__15, %preheader__3 + %idx__2 = phi i64 [ %287, %preheader__3 ], [ %319, %exiting__15 ] + %307 = icmp sle i64 %idx__2, %289 + %308 = icmp sge i64 %idx__2, %289 + %309 = select i1 %306, i1 %307, i1 %308 + br i1 %309, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %310 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %idx__2) + %311 = bitcast i8* %310 to { %Array*, %Array* }** + %312 = load { %Array*, %Array* }*, { %Array*, %Array* }** %311, align 8 + %313 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %314 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %313, i64 0) + %315 = bitcast i8* %314 to i64* + store i64 2, i64* %315, align 4 + %316 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %312, %Array* %313) + %genArr = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PQandPQQRTermToPauliGenIdx_____body({ { %Array*, %Array* }*, %Array* }* %316) + %317 = call i64 @__quantum__rt__array_get_size_1d(%Array* %genArr) + %318 = sub i64 %317, 1 + br label %header__16 + +exiting__15: ; preds = %exit__18 + %319 = add i64 %idx__2, %288 + br label %header__15 + +exit__15: ; preds = %header__15 + %320 = load i64, i64* %startIdx, align 4 + %321 = mul i64 2, %29 + %322 = add i64 %320, %321 + store i64 %322, i64* %startIdx, align 4 + store i64 %322, i64* %finalIdx, align 4 + %323 = sub i64 %43, 1 + br label %header__19 + +header__16: ; preds = %exiting__16, %body__15 + %324 = phi i64 [ 0, %body__15 ], [ %339, %exiting__16 ] + %325 = icmp sle i64 %324, %318 + br i1 %325, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %326 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr, i64 %324) + %327 = bitcast i8* %326 to { { %Array*, %Array* }*, %Array* }** + %328 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %327, align 8 + %329 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %328, i32 0, i32 0 + %330 = load { %Array*, %Array* }*, { %Array*, %Array* }** %329, align 8 + %331 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %330, i32 0, i32 0 + %332 = load %Array*, %Array** %331, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %332, i32 1) + %333 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %330, i32 0, i32 1 + %334 = load %Array*, %Array** %333, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %334, i32 1) + %335 = bitcast { %Array*, %Array* }* %330 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %335, i32 1) + %336 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %328, i32 0, i32 1 + %337 = load %Array*, %Array** %336, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %337, i32 1) + %338 = bitcast { { %Array*, %Array* }*, %Array* }* %328 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %338, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %339 = add i64 %324, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %genArr, i32 1) + %340 = load %Array*, %Array** %genIdxes, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %340, i32 -1) + %341 = call %Array* @__quantum__rt__array_copy(%Array* %340, i1 false) + %342 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr, i64 0) + %343 = bitcast i8* %342 to { { %Array*, %Array* }*, %Array* }** + %344 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %343, align 8 + %345 = load i64, i64* %startIdx, align 4 + %346 = mul i64 2, %idx__2 + %347 = add i64 %345, %346 + %348 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %341, i64 %347) + %349 = bitcast i8* %348 to { { %Array*, %Array* }*, %Array* }** + %350 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %344, i32 0, i32 0 + %351 = load { %Array*, %Array* }*, { %Array*, %Array* }** %350, align 8 + %352 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %351, i32 0, i32 0 + %353 = load %Array*, %Array** %352, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %353, i32 1) + %354 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %351, i32 0, i32 1 + %355 = load %Array*, %Array** %354, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %355, i32 1) + %356 = bitcast { %Array*, %Array* }* %351 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %356, i32 1) + %357 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %344, i32 0, i32 1 + %358 = load %Array*, %Array** %357, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %358, i32 1) + %359 = bitcast { { %Array*, %Array* }*, %Array* }* %344 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %359, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %353, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %355, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %356, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %358, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %359, i32 1) + %360 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %349, align 8 + %361 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %360, i32 0, i32 0 + %362 = load { %Array*, %Array* }*, { %Array*, %Array* }** %361, align 8 + %363 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %362, i32 0, i32 0 + %364 = load %Array*, %Array** %363, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %364, i32 -1) + %365 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %362, i32 0, i32 1 + %366 = load %Array*, %Array** %365, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %366, i32 -1) + %367 = bitcast { %Array*, %Array* }* %362 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %367, i32 -1) + %368 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %360, i32 0, i32 1 + %369 = load %Array*, %Array** %368, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %369, i32 -1) + %370 = bitcast { { %Array*, %Array* }*, %Array* }* %360 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %370, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %364, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %366, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %367, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %369, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %370, i32 -1) + store { { %Array*, %Array* }*, %Array* }* %344, { { %Array*, %Array* }*, %Array* }** %349, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %341, i32 1) + store %Array* %341, %Array** %genIdxes, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %341, i32 -1) + %371 = call %Array* @__quantum__rt__array_copy(%Array* %341, i1 false) + %372 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr, i64 1) + %373 = bitcast i8* %372 to { { %Array*, %Array* }*, %Array* }** + %374 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %373, align 8 + %375 = mul i64 2, %idx__2 + %376 = add i64 %345, %375 + %377 = add i64 %376, 1 + %378 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %371, i64 %377) + %379 = bitcast i8* %378 to { { %Array*, %Array* }*, %Array* }** + %380 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %374, i32 0, i32 0 + %381 = load { %Array*, %Array* }*, { %Array*, %Array* }** %380, align 8 + %382 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %381, i32 0, i32 0 + %383 = load %Array*, %Array** %382, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %383, i32 1) + %384 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %381, i32 0, i32 1 + %385 = load %Array*, %Array** %384, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %385, i32 1) + %386 = bitcast { %Array*, %Array* }* %381 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %386, i32 1) + %387 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %374, i32 0, i32 1 + %388 = load %Array*, %Array** %387, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %388, i32 1) + %389 = bitcast { { %Array*, %Array* }*, %Array* }* %374 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %389, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %383, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %385, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %386, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %388, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %389, i32 1) + %390 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %379, align 8 + %391 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %390, i32 0, i32 0 + %392 = load { %Array*, %Array* }*, { %Array*, %Array* }** %391, align 8 + %393 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %392, i32 0, i32 0 + %394 = load %Array*, %Array** %393, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %394, i32 -1) + %395 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %392, i32 0, i32 1 + %396 = load %Array*, %Array** %395, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %396, i32 -1) + %397 = bitcast { %Array*, %Array* }* %392 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %397, i32 -1) + %398 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %390, i32 0, i32 1 + %399 = load %Array*, %Array** %398, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %399, i32 -1) + %400 = bitcast { { %Array*, %Array* }*, %Array* }* %390 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %400, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %394, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %396, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %397, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %399, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %400, i32 -1) + store { { %Array*, %Array* }*, %Array* }* %374, { { %Array*, %Array* }*, %Array* }** %379, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %371, i32 1) + store %Array* %371, %Array** %genIdxes, align 8 + %401 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %316, i32 0, i32 0 + %402 = load { %Array*, %Array* }*, { %Array*, %Array* }** %401, align 8 + %403 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %402, i32 0, i32 0 + %404 = load %Array*, %Array** %403, align 8 + %405 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %402, i32 0, i32 1 + %406 = load %Array*, %Array** %405, align 8 + %407 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %316, i32 0, i32 1 + %408 = load %Array*, %Array** %407, align 8 + %409 = sub i64 %317, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %410 = phi i64 [ 0, %exit__16 ], [ %425, %exiting__17 ] + %411 = icmp sle i64 %410, %409 + br i1 %411, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %412 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr, i64 %410) + %413 = bitcast i8* %412 to { { %Array*, %Array* }*, %Array* }** + %414 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %413, align 8 + %415 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %414, i32 0, i32 0 + %416 = load { %Array*, %Array* }*, { %Array*, %Array* }** %415, align 8 + %417 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %416, i32 0, i32 0 + %418 = load %Array*, %Array** %417, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %418, i32 -1) + %419 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %416, i32 0, i32 1 + %420 = load %Array*, %Array** %419, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %420, i32 -1) + %421 = bitcast { %Array*, %Array* }* %416 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %421, i32 -1) + %422 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %414, i32 0, i32 1 + %423 = load %Array*, %Array** %422, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %423, i32 -1) + %424 = bitcast { { %Array*, %Array* }*, %Array* }* %414 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %424, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %425 = add i64 %410, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %genArr, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %313, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %404, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %406, i32 -1) + %426 = bitcast { %Array*, %Array* }* %402 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %426, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %408, i32 -1) + %427 = bitcast { { %Array*, %Array* }*, %Array* }* %316 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %427, i32 -1) + %428 = sub i64 %317, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %429 = phi i64 [ 0, %exit__17 ], [ %444, %exiting__18 ] + %430 = icmp sle i64 %429, %428 + br i1 %430, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %431 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr, i64 %429) + %432 = bitcast i8* %431 to { { %Array*, %Array* }*, %Array* }** + %433 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %432, align 8 + %434 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %433, i32 0, i32 0 + %435 = load { %Array*, %Array* }*, { %Array*, %Array* }** %434, align 8 + %436 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %435, i32 0, i32 0 + %437 = load %Array*, %Array** %436, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %437, i32 -1) + %438 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %435, i32 0, i32 1 + %439 = load %Array*, %Array** %438, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %439, i32 -1) + %440 = bitcast { %Array*, %Array* }* %435 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %440, i32 -1) + %441 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %433, i32 0, i32 1 + %442 = load %Array*, %Array** %441, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %442, i32 -1) + %443 = bitcast { { %Array*, %Array* }*, %Array* }* %433 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %443, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %444 = add i64 %429, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_reference_count(%Array* %genArr, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %340, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %341, i32 -1) + br label %exiting__15 + +header__19: ; preds = %exiting__19, %exit__15 + %idx__3 = phi i64 [ 0, %exit__15 ], [ %455, %exiting__19 ] + %445 = icmp sle i64 %idx__3, %323 + br i1 %445, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %446 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %idx__3) + %447 = bitcast i8* %446 to { %Array*, %Array* }** + %448 = load { %Array*, %Array* }*, { %Array*, %Array* }** %447, align 8 + %449 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %450 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %449, i64 0) + %451 = bitcast i8* %450 to i64* + store i64 3, i64* %451, align 4 + %452 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %448, %Array* %449) + %genArr__1 = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___V0123TermToPauliGenIdx_____body({ { %Array*, %Array* }*, %Array* }* %452) + %453 = call i64 @__quantum__rt__array_get_size_1d(%Array* %genArr__1) + %454 = sub i64 %453, 1 + br label %header__20 + +exiting__19: ; preds = %exit__23 + %455 = add i64 %idx__3, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + %456 = load i64, i64* %finalIdx, align 4 + %457 = load %Array*, %Array** %genIdxes, align 8 + %458 = sub i64 %456, 1 + %459 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %458, 2 + %460 = call %Array* @__quantum__rt__array_slice_1d(%Array* %457, %Range %459, i1 true) + %461 = call %Callable* @Microsoft__Quantum__Arrays___afda34fbc524426087f3337291e18d1f_LookupFunction__body(%Array* %460) + %462 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %456, %Callable* %461) + %463 = load { %Array*, %Array* }*, { %Array*, %Array* }** %114, align 8 + %464 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %463, i32 0, i32 0 + %465 = load %Array*, %Array** %464, align 8 + %466 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %463, i32 0, i32 1 + %467 = load %Array*, %Array** %466, align 8 + %468 = load %Array*, %Array** %115, align 8 + %469 = sub i64 %1, 1 + br label %header__24 + +header__20: ; preds = %exiting__20, %body__19 + %470 = phi i64 [ 0, %body__19 ], [ %485, %exiting__20 ] + %471 = icmp sle i64 %470, %454 + br i1 %471, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %472 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr__1, i64 %470) + %473 = bitcast i8* %472 to { { %Array*, %Array* }*, %Array* }** + %474 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %473, align 8 + %475 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %474, i32 0, i32 0 + %476 = load { %Array*, %Array* }*, { %Array*, %Array* }** %475, align 8 + %477 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %476, i32 0, i32 0 + %478 = load %Array*, %Array** %477, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %478, i32 1) + %479 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %476, i32 0, i32 1 + %480 = load %Array*, %Array** %479, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %480, i32 1) + %481 = bitcast { %Array*, %Array* }* %476 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %481, i32 1) + %482 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %474, i32 0, i32 1 + %483 = load %Array*, %Array** %482, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %483, i32 1) + %484 = bitcast { { %Array*, %Array* }*, %Array* }* %474 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %484, i32 1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %485 = add i64 %470, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %genArr__1, i32 1) + %486 = call %Range @Microsoft__Quantum__Arrays___1282ba485eb84ebd9e61ed357fc1aebb_IndexRange__body(%Array* %genArr__1) + %487 = extractvalue %Range %486, 0 + %488 = extractvalue %Range %486, 1 + %489 = extractvalue %Range %486, 2 + br label %preheader__4 + +preheader__4: ; preds = %exit__20 + %490 = icmp sgt i64 %488, 0 + br label %header__21 + +header__21: ; preds = %exiting__21, %preheader__4 + %idx0123 = phi i64 [ %487, %preheader__4 ], [ %524, %exiting__21 ] + %491 = icmp sle i64 %idx0123, %489 + %492 = icmp sge i64 %idx0123, %489 + %493 = select i1 %490, i1 %491, i1 %492 + br i1 %493, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %494 = load %Array*, %Array** %genIdxes, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %494, i32 -1) + %495 = call %Array* @__quantum__rt__array_copy(%Array* %494, i1 false) + %496 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr__1, i64 %idx0123) + %497 = bitcast i8* %496 to { { %Array*, %Array* }*, %Array* }** + %498 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %497, align 8 + %499 = load i64, i64* %finalIdx, align 4 + %500 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %495, i64 %499) + %501 = bitcast i8* %500 to { { %Array*, %Array* }*, %Array* }** + %502 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %498, i32 0, i32 0 + %503 = load { %Array*, %Array* }*, { %Array*, %Array* }** %502, align 8 + %504 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %503, i32 0, i32 0 + %505 = load %Array*, %Array** %504, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %505, i32 1) + %506 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %503, i32 0, i32 1 + %507 = load %Array*, %Array** %506, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %507, i32 1) + %508 = bitcast { %Array*, %Array* }* %503 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %508, i32 1) + %509 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %498, i32 0, i32 1 + %510 = load %Array*, %Array** %509, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %510, i32 1) + %511 = bitcast { { %Array*, %Array* }*, %Array* }* %498 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %511, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %505, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %507, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %508, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %510, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %511, i32 1) + %512 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %501, align 8 + %513 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %512, i32 0, i32 0 + %514 = load { %Array*, %Array* }*, { %Array*, %Array* }** %513, align 8 + %515 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %514, i32 0, i32 0 + %516 = load %Array*, %Array** %515, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %516, i32 -1) + %517 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %514, i32 0, i32 1 + %518 = load %Array*, %Array** %517, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %518, i32 -1) + %519 = bitcast { %Array*, %Array* }* %514 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %519, i32 -1) + %520 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %512, i32 0, i32 1 + %521 = load %Array*, %Array** %520, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %521, i32 -1) + %522 = bitcast { { %Array*, %Array* }*, %Array* }* %512 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %522, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %516, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %518, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %519, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %521, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %522, i32 -1) + store { { %Array*, %Array* }*, %Array* }* %498, { { %Array*, %Array* }*, %Array* }** %501, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %495, i32 1) + store %Array* %495, %Array** %genIdxes, align 8 + %523 = add i64 %499, 1 + store i64 %523, i64* %finalIdx, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %494, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %524 = add i64 %idx0123, %488 + br label %header__21 + +exit__21: ; preds = %header__21 + %525 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %452, i32 0, i32 0 + %526 = load { %Array*, %Array* }*, { %Array*, %Array* }** %525, align 8 + %527 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %526, i32 0, i32 0 + %528 = load %Array*, %Array** %527, align 8 + %529 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %526, i32 0, i32 1 + %530 = load %Array*, %Array** %529, align 8 + %531 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %452, i32 0, i32 1 + %532 = load %Array*, %Array** %531, align 8 + %533 = sub i64 %453, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %534 = phi i64 [ 0, %exit__21 ], [ %549, %exiting__22 ] + %535 = icmp sle i64 %534, %533 + br i1 %535, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %536 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr__1, i64 %534) + %537 = bitcast i8* %536 to { { %Array*, %Array* }*, %Array* }** + %538 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %537, align 8 + %539 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %538, i32 0, i32 0 + %540 = load { %Array*, %Array* }*, { %Array*, %Array* }** %539, align 8 + %541 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %540, i32 0, i32 0 + %542 = load %Array*, %Array** %541, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %542, i32 -1) + %543 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %540, i32 0, i32 1 + %544 = load %Array*, %Array** %543, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %544, i32 -1) + %545 = bitcast { %Array*, %Array* }* %540 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %545, i32 -1) + %546 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %538, i32 0, i32 1 + %547 = load %Array*, %Array** %546, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %547, i32 -1) + %548 = bitcast { { %Array*, %Array* }*, %Array* }* %538 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %548, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %549 = add i64 %534, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %genArr__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %449, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %528, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %530, i32 -1) + %550 = bitcast { %Array*, %Array* }* %526 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %550, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %532, i32 -1) + %551 = bitcast { { %Array*, %Array* }*, %Array* }* %452 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %551, i32 -1) + %552 = sub i64 %453, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %553 = phi i64 [ 0, %exit__22 ], [ %568, %exiting__23 ] + %554 = icmp sle i64 %553, %552 + br i1 %554, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %555 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %genArr__1, i64 %553) + %556 = bitcast i8* %555 to { { %Array*, %Array* }*, %Array* }** + %557 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %556, align 8 + %558 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %557, i32 0, i32 0 + %559 = load { %Array*, %Array* }*, { %Array*, %Array* }** %558, align 8 + %560 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %559, i32 0, i32 0 + %561 = load %Array*, %Array** %560, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %561, i32 -1) + %562 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %559, i32 0, i32 1 + %563 = load %Array*, %Array** %562, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %563, i32 -1) + %564 = bitcast { %Array*, %Array* }* %559 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %564, i32 -1) + %565 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %557, i32 0, i32 1 + %566 = load %Array*, %Array** %565, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %566, i32 -1) + %567 = bitcast { { %Array*, %Array* }*, %Array* }* %557 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %567, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %568 = add i64 %553, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_reference_count(%Array* %genArr__1, i32 -1) + br label %exiting__19 + +header__24: ; preds = %exiting__24, %exit__19 + %569 = phi i64 [ 0, %exit__19 ], [ %579, %exiting__24 ] + %570 = icmp sle i64 %569, %469 + br i1 %570, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %571 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %569) + %572 = bitcast i8* %571 to { %Array*, %Array* }** + %573 = load { %Array*, %Array* }*, { %Array*, %Array* }** %572, align 8 + %574 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %573, i32 0, i32 0 + %575 = load %Array*, %Array** %574, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %575, i32 -1) + %576 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %573, i32 0, i32 1 + %577 = load %Array*, %Array** %576, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %577, i32 -1) + %578 = bitcast { %Array*, %Array* }* %573 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %578, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %579 = add i64 %569, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %580 = sub i64 %15, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %581 = phi i64 [ 0, %exit__24 ], [ %591, %exiting__25 ] + %582 = icmp sle i64 %581, %580 + br i1 %582, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %583 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %581) + %584 = bitcast i8* %583 to { %Array*, %Array* }** + %585 = load { %Array*, %Array* }*, { %Array*, %Array* }** %584, align 8 + %586 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %585, i32 0, i32 0 + %587 = load %Array*, %Array** %586, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %587, i32 -1) + %588 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %585, i32 0, i32 1 + %589 = load %Array*, %Array** %588, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %589, i32 -1) + %590 = bitcast { %Array*, %Array* }* %585 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %590, i32 -1) + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %591 = add i64 %581, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %592 = sub i64 %29, 1 + br label %header__26 + +header__26: ; preds = %exiting__26, %exit__25 + %593 = phi i64 [ 0, %exit__25 ], [ %603, %exiting__26 ] + %594 = icmp sle i64 %593, %592 + br i1 %594, label %body__26, label %exit__26 + +body__26: ; preds = %header__26 + %595 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %593) + %596 = bitcast i8* %595 to { %Array*, %Array* }** + %597 = load { %Array*, %Array* }*, { %Array*, %Array* }** %596, align 8 + %598 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %597, i32 0, i32 0 + %599 = load %Array*, %Array** %598, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %599, i32 -1) + %600 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %597, i32 0, i32 1 + %601 = load %Array*, %Array** %600, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %601, i32 -1) + %602 = bitcast { %Array*, %Array* }* %597 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %602, i32 -1) + br label %exiting__26 + +exiting__26: ; preds = %body__26 + %603 = add i64 %593, 1 + br label %header__26 + +exit__26: ; preds = %header__26 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %604 = sub i64 %43, 1 + br label %header__27 + +header__27: ; preds = %exiting__27, %exit__26 + %605 = phi i64 [ 0, %exit__26 ], [ %615, %exiting__27 ] + %606 = icmp sle i64 %605, %604 + br i1 %606, label %body__27, label %exit__27 + +body__27: ; preds = %header__27 + %607 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %605) + %608 = bitcast i8* %607 to { %Array*, %Array* }** + %609 = load { %Array*, %Array* }*, { %Array*, %Array* }** %608, align 8 + %610 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %609, i32 0, i32 0 + %611 = load %Array*, %Array** %610, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %611, i32 -1) + %612 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %609, i32 0, i32 1 + %613 = load %Array*, %Array** %612, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %613, i32 -1) + %614 = bitcast { %Array*, %Array* }* %609 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %614, i32 -1) + br label %exiting__27 + +exiting__27: ; preds = %body__27 + %615 = add i64 %605, 1 + br label %header__27 + +exit__27: ; preds = %header__27 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + %616 = sub i64 %1, 1 + br label %header__28 + +header__28: ; preds = %exiting__28, %exit__27 + %617 = phi i64 [ 0, %exit__27 ], [ %627, %exiting__28 ] + %618 = icmp sle i64 %617, %616 + br i1 %618, label %body__28, label %exit__28 + +body__28: ; preds = %header__28 + %619 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %617) + %620 = bitcast i8* %619 to { %Array*, %Array* }** + %621 = load { %Array*, %Array* }*, { %Array*, %Array* }** %620, align 8 + %622 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %621, i32 0, i32 0 + %623 = load %Array*, %Array** %622, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %623, i32 -1) + %624 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %621, i32 0, i32 1 + %625 = load %Array*, %Array** %624, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %625, i32 -1) + %626 = bitcast { %Array*, %Array* }* %621 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %626, i32 -1) + br label %exiting__28 + +exiting__28: ; preds = %body__28 + %627 = add i64 %617, 1 + br label %header__28 + +exit__28: ; preds = %header__28 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %628 = sub i64 %15, 1 + br label %header__29 + +header__29: ; preds = %exiting__29, %exit__28 + %629 = phi i64 [ 0, %exit__28 ], [ %639, %exiting__29 ] + %630 = icmp sle i64 %629, %628 + br i1 %630, label %body__29, label %exit__29 + +body__29: ; preds = %header__29 + %631 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %629) + %632 = bitcast i8* %631 to { %Array*, %Array* }** + %633 = load { %Array*, %Array* }*, { %Array*, %Array* }** %632, align 8 + %634 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %633, i32 0, i32 0 + %635 = load %Array*, %Array** %634, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %635, i32 -1) + %636 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %633, i32 0, i32 1 + %637 = load %Array*, %Array** %636, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %637, i32 -1) + %638 = bitcast { %Array*, %Array* }* %633 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %638, i32 -1) + br label %exiting__29 + +exiting__29: ; preds = %body__29 + %639 = add i64 %629, 1 + br label %header__29 + +exit__29: ; preds = %header__29 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %640 = sub i64 %29, 1 + br label %header__30 + +header__30: ; preds = %exiting__30, %exit__29 + %641 = phi i64 [ 0, %exit__29 ], [ %651, %exiting__30 ] + %642 = icmp sle i64 %641, %640 + br i1 %642, label %body__30, label %exit__30 + +body__30: ; preds = %header__30 + %643 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %641) + %644 = bitcast i8* %643 to { %Array*, %Array* }** + %645 = load { %Array*, %Array* }*, { %Array*, %Array* }** %644, align 8 + %646 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %645, i32 0, i32 0 + %647 = load %Array*, %Array** %646, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %647, i32 -1) + %648 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %645, i32 0, i32 1 + %649 = load %Array*, %Array** %648, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %649, i32 -1) + %650 = bitcast { %Array*, %Array* }* %645 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %650, i32 -1) + br label %exiting__30 + +exiting__30: ; preds = %body__30 + %651 = add i64 %641, 1 + br label %header__30 + +exit__30: ; preds = %header__30 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %652 = sub i64 %43, 1 + br label %header__31 + +header__31: ; preds = %exiting__31, %exit__30 + %653 = phi i64 [ 0, %exit__30 ], [ %663, %exiting__31 ] + %654 = icmp sle i64 %653, %652 + br i1 %654, label %body__31, label %exit__31 + +body__31: ; preds = %header__31 + %655 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %653) + %656 = bitcast i8* %655 to { %Array*, %Array* }** + %657 = load { %Array*, %Array* }*, { %Array*, %Array* }** %656, align 8 + %658 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %657, i32 0, i32 0 + %659 = load %Array*, %Array** %658, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %659, i32 -1) + %660 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %657, i32 0, i32 1 + %661 = load %Array*, %Array** %660, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %661, i32 -1) + %662 = bitcast { %Array*, %Array* }* %657 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %662, i32 -1) + br label %exiting__31 + +exiting__31: ; preds = %body__31 + %663 = add i64 %653, 1 + br label %header__31 + +exit__31: ; preds = %header__31 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + %664 = call i64 @__quantum__rt__array_get_size_1d(%Array* %457) + %665 = sub i64 %664, 1 + br label %header__32 + +header__32: ; preds = %exiting__32, %exit__31 + %666 = phi i64 [ 0, %exit__31 ], [ %681, %exiting__32 ] + %667 = icmp sle i64 %666, %665 + br i1 %667, label %body__32, label %exit__32 + +body__32: ; preds = %header__32 + %668 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %457, i64 %666) + %669 = bitcast i8* %668 to { { %Array*, %Array* }*, %Array* }** + %670 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %669, align 8 + %671 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %670, i32 0, i32 0 + %672 = load { %Array*, %Array* }*, { %Array*, %Array* }** %671, align 8 + %673 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %672, i32 0, i32 0 + %674 = load %Array*, %Array** %673, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %674, i32 -1) + %675 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %672, i32 0, i32 1 + %676 = load %Array*, %Array** %675, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %676, i32 -1) + %677 = bitcast { %Array*, %Array* }* %672 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %677, i32 -1) + %678 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %670, i32 0, i32 1 + %679 = load %Array*, %Array** %678, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %679, i32 -1) + %680 = bitcast { { %Array*, %Array* }*, %Array* }* %670 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %680, i32 -1) + br label %exiting__32 + +exiting__32: ; preds = %body__32 + %681 = add i64 %666, 1 + br label %header__32 + +exit__32: ; preds = %header__32 + call void @__quantum__rt__array_update_alias_count(%Array* %457, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %465, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %467, i32 -1) + %682 = bitcast { %Array*, %Array* }* %463 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %682, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %468, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %112, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %460, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %461, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %461, i32 -1) + %683 = sub i64 %664, 1 + br label %header__33 + +header__33: ; preds = %exiting__33, %exit__32 + %684 = phi i64 [ 0, %exit__32 ], [ %699, %exiting__33 ] + %685 = icmp sle i64 %684, %683 + br i1 %685, label %body__33, label %exit__33 + +body__33: ; preds = %header__33 + %686 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %457, i64 %684) + %687 = bitcast i8* %686 to { { %Array*, %Array* }*, %Array* }** + %688 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %687, align 8 + %689 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %688, i32 0, i32 0 + %690 = load { %Array*, %Array* }*, { %Array*, %Array* }** %689, align 8 + %691 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %690, i32 0, i32 0 + %692 = load %Array*, %Array** %691, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %692, i32 -1) + %693 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %690, i32 0, i32 1 + %694 = load %Array*, %Array** %693, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %694, i32 -1) + %695 = bitcast { %Array*, %Array* }* %690 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %695, i32 -1) + %696 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %688, i32 0, i32 1 + %697 = load %Array*, %Array** %696, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %697, i32 -1) + %698 = bitcast { { %Array*, %Array* }*, %Array* }* %688 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %698, i32 -1) + br label %exiting__33 + +exiting__33: ; preds = %body__33 + %699 = add i64 %684, 1 + br label %header__33 + +exit__33: ; preds = %header__33 + call void @__quantum__rt__array_update_reference_count(%Array* %457, i32 -1) + ret { i64, %Callable* }* %462 +} + +define internal { double, { { %Callable* }* }* }* @Microsoft__Quantum__Simulation__PauliBlockEncoding__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %statePrepUnitary = call %Callable* @Microsoft__Quantum__Canon___6a714b914d4d48de8a4ad7810cc0d5d1_CurriedOpCA__body(%Callable* %3) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 1) + %multiplexer = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__MultiplexerFromGenerator__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 1) + %4 = call { double, { { %Callable* }* }* }* @Microsoft__Quantum__Simulation____QsRef3___PauliBlockEncoding____body({ i64, %Callable* }* %generatorSystem, %Callable* %statePrepUnitary, %Callable* %multiplexer) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %multiplexer, i32 -1) + ret { double, { { %Callable* }* }* }* %4 +} + +define internal i64 @Microsoft__Quantum__Math__Ceiling__body(double %value) { +entry: + %0 = call { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef2__ExtendedTruncation____body(double %value) + %1 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 0 + %truncated = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 1 + %remainder = load double, double* %2, align 8 + %3 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 2 + %isPositive = load i1, i1* %3, align 1 + %4 = call double @Microsoft__Quantum__Math__AbsD__body(double %remainder) + %5 = fcmp ole double %4, 1.000000e-15 + br i1 %5, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %6 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret i64 %truncated + +else__1: ; preds = %entry + br i1 %isPositive, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %7 = add i64 %truncated, 1 + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %8 = phi i64 [ %7, %condTrue__1 ], [ %truncated, %condFalse__1 ] + %9 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret i64 %8 + +continue__1: ; No predecessors! + unreachable +} + +define internal double @Microsoft__Quantum__Math__Lg__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + %1 = call double @Microsoft__Quantum__Math__LogOf2__body() + %2 = fdiv double %0, %1 + ret double %2 +} + +define internal %Callable* @Microsoft__Quantum__Simulation__QuantumWalkByQubitization__body({ { %Callable* }* }* %blockEncoding) { +entry: + %0 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %blockEncoding, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = bitcast { { %Callable* }* }* %blockEncoding to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Callable* }* }* }* getelementptr ({ %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Callable*, { { %Callable* }* }* }* + %9 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %8, i32 0, i32 1 + store %Callable* %6, %Callable** %9, align 8 + store { { %Callable* }* }* %blockEncoding, { { %Callable* }* }** %10, align 8 + %11 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__58__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__34__FunctionTable, %Tuple* %7) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + ret %Callable* %11 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___V0123TermToPauliGenIdx_____body({ { %Array*, %Array* }*, %Array* }* %term) { +entry: + %nonZero = alloca i64, align 8 + %genIdxes = alloca %Array*, align 8 + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %v0123 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %11 = load i64, i64* %8, align 4 + %12 = load i64, i64* %10, align 4 + %qubitsPQ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubitsPQ, i64 0) + %14 = bitcast i8* %13 to i64* + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubitsPQ, i64 1) + %16 = bitcast i8* %15 to i64* + store i64 %11, i64* %14, align 4 + store i64 %12, i64* %16, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 1) + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %18 = bitcast i8* %17 to i64* + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %20 = bitcast i8* %19 to i64* + %21 = load i64, i64* %18, align 4 + %22 = load i64, i64* %20, align 4 + %qubitsRS = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubitsRS, i64 0) + %24 = bitcast i8* %23 to i64* + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubitsRS, i64 1) + %26 = bitcast i8* %25 to i64* + store i64 %21, i64* %24, align 4 + store i64 %22, i64* %26, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRS, i32 1) + %27 = add i64 %11, 1 + %28 = sub i64 %12, 1 + %29 = insertvalue %Range zeroinitializer, i64 %27, 0 + %30 = insertvalue %Range %29, i64 1, 1 + %31 = insertvalue %Range %30, i64 %28, 2 + %qubitsPQJW = call %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %31) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQJW, i32 1) + %32 = add i64 %21, 1 + %33 = sub i64 %22, 1 + %34 = insertvalue %Range zeroinitializer, i64 %32, 0 + %35 = insertvalue %Range %34, i64 1, 1 + %36 = insertvalue %Range %35, i64 %33, 2 + %qubitsRSJW = call %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %36) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRSJW, i32 1) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to i64* + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 1) + %41 = bitcast i8* %40 to i64* + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 2) + %43 = bitcast i8* %42 to i64* + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 3) + %45 = bitcast i8* %44 to i64* + store i64 1, i64* %39, align 4 + store i64 1, i64* %41, align 4 + store i64 1, i64* %43, align 4 + store i64 1, i64* %45, align 4 + %46 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %46, i64 0) + %48 = bitcast i8* %47 to i64* + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %46, i64 1) + %50 = bitcast i8* %49 to i64* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %46, i64 2) + %52 = bitcast i8* %51 to i64* + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %46, i64 3) + %54 = bitcast i8* %53 to i64* + store i64 1, i64* %48, align 4 + store i64 1, i64* %50, align 4 + store i64 2, i64* %52, align 4 + store i64 2, i64* %54, align 4 + %55 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to i64* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 1) + %59 = bitcast i8* %58 to i64* + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 2) + %61 = bitcast i8* %60 to i64* + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 3) + %63 = bitcast i8* %62 to i64* + store i64 1, i64* %57, align 4 + store i64 2, i64* %59, align 4 + store i64 1, i64* %61, align 4 + store i64 2, i64* %63, align 4 + %64 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 0) + %66 = bitcast i8* %65 to i64* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 1) + %68 = bitcast i8* %67 to i64* + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 2) + %70 = bitcast i8* %69 to i64* + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 3) + %72 = bitcast i8* %71 to i64* + store i64 2, i64* %66, align 4 + store i64 1, i64* %68, align 4 + store i64 1, i64* %70, align 4 + store i64 2, i64* %72, align 4 + %73 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 0) + %75 = bitcast i8* %74 to i64* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 1) + %77 = bitcast i8* %76 to i64* + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 2) + %79 = bitcast i8* %78 to i64* + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 3) + %81 = bitcast i8* %80 to i64* + store i64 2, i64* %75, align 4 + store i64 2, i64* %77, align 4 + store i64 2, i64* %79, align 4 + store i64 2, i64* %81, align 4 + %82 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 0) + %84 = bitcast i8* %83 to i64* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 1) + %86 = bitcast i8* %85 to i64* + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 2) + %88 = bitcast i8* %87 to i64* + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 3) + %90 = bitcast i8* %89 to i64* + store i64 2, i64* %84, align 4 + store i64 2, i64* %86, align 4 + store i64 1, i64* %88, align 4 + store i64 1, i64* %90, align 4 + %91 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 0) + %93 = bitcast i8* %92 to i64* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 1) + %95 = bitcast i8* %94 to i64* + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 2) + %97 = bitcast i8* %96 to i64* + %98 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %91, i64 3) + %99 = bitcast i8* %98 to i64* + store i64 2, i64* %93, align 4 + store i64 1, i64* %95, align 4 + store i64 2, i64* %97, align 4 + store i64 1, i64* %99, align 4 + %100 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 0) + %102 = bitcast i8* %101 to i64* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 1) + %104 = bitcast i8* %103 to i64* + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 2) + %106 = bitcast i8* %105 to i64* + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 3) + %108 = bitcast i8* %107 to i64* + store i64 1, i64* %102, align 4 + store i64 2, i64* %104, align 4 + store i64 2, i64* %106, align 4 + store i64 1, i64* %108, align 4 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %110 = bitcast i8* %109 to %Array** + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %112 = bitcast i8* %111 to %Array** + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %114 = bitcast i8* %113 to %Array** + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %116 = bitcast i8* %115 to %Array** + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %118 = bitcast i8* %117 to %Array** + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %124 = bitcast i8* %123 to %Array** + store %Array* %37, %Array** %110, align 8 + store %Array* %46, %Array** %112, align 8 + store %Array* %55, %Array** %114, align 8 + store %Array* %64, %Array** %116, align 8 + store %Array* %73, %Array** %118, align 8 + store %Array* %82, %Array** %120, align 8 + store %Array* %91, %Array** %122, align 8 + store %Array* %100, %Array** %124, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %125 = phi i64 [ 0, %entry ], [ %130, %exiting__1 ] + %126 = icmp sle i64 %125, 7 + br i1 %126, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %125) + %128 = bitcast i8* %127 to %Array** + %129 = load %Array*, %Array** %128, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %129, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %130 = add i64 %125, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %131 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %132 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %133 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %134 = bitcast %Tuple* %133 to { %Array*, %Array* }* + %135 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %134, i32 0, i32 0 + %136 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %134, i32 0, i32 1 + store %Array* %131, %Array** %135, align 8 + store %Array* %132, %Array** %136, align 8 + %137 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %138 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array* }*, %Array* }* getelementptr ({ { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* null, i32 1) to i64)) + %139 = bitcast %Tuple* %138 to { { %Array*, %Array* }*, %Array* }* + %140 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %139, i32 0, i32 0 + %141 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %139, i32 0, i32 1 + store { %Array*, %Array* }* %134, { %Array*, %Array* }** %140, align 8 + store %Array* %137, %Array** %141, align 8 + %142 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %143 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 0) + %144 = bitcast i8* %143 to { { %Array*, %Array* }*, %Array* }** + %145 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 1) + %146 = bitcast i8* %145 to { { %Array*, %Array* }*, %Array* }** + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 2) + %148 = bitcast i8* %147 to { { %Array*, %Array* }*, %Array* }** + %149 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 3) + %150 = bitcast i8* %149 to { { %Array*, %Array* }*, %Array* }** + %151 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 4) + %152 = bitcast i8* %151 to { { %Array*, %Array* }*, %Array* }** + %153 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 5) + %154 = bitcast i8* %153 to { { %Array*, %Array* }*, %Array* }** + %155 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 6) + %156 = bitcast i8* %155 to { { %Array*, %Array* }*, %Array* }** + %157 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 7) + %158 = bitcast i8* %157 to { { %Array*, %Array* }*, %Array* }** + store { { %Array*, %Array* }*, %Array* }* %139, { { %Array*, %Array* }*, %Array* }** %144, align 8 + store { { %Array*, %Array* }*, %Array* }* %139, { { %Array*, %Array* }*, %Array* }** %146, align 8 + store { { %Array*, %Array* }*, %Array* }* %139, { { %Array*, %Array* }*, %Array* }** %148, align 8 + store { { %Array*, %Array* }*, %Array* }* %139, { { %Array*, %Array* }*, %Array* }** %150, align 8 + store { { %Array*, %Array* }*, %Array* }* %139, { { %Array*, %Array* }*, %Array* }** %152, align 8 + store { { %Array*, %Array* }*, %Array* }* %139, { { %Array*, %Array* }*, %Array* }** %154, align 8 + store { { %Array*, %Array* }*, %Array* }* %139, { { %Array*, %Array* }*, %Array* }** %156, align 8 + store { { %Array*, %Array* }*, %Array* }* %139, { { %Array*, %Array* }*, %Array* }** %158, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 1) + store %Array* %142, %Array** %genIdxes, align 8 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %159 = phi i64 [ 0, %exit__1 ], [ %174, %exiting__2 ] + %160 = icmp sle i64 %159, 7 + br i1 %160, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %161 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %142, i64 %159) + %162 = bitcast i8* %161 to { { %Array*, %Array* }*, %Array* }** + %163 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %162, align 8 + %164 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %163, i32 0, i32 0 + %165 = load { %Array*, %Array* }*, { %Array*, %Array* }** %164, align 8 + %166 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %165, i32 0, i32 0 + %167 = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 1) + %168 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %165, i32 0, i32 1 + %169 = load %Array*, %Array** %168, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %169, i32 1) + %170 = bitcast { %Array*, %Array* }* %165 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %170, i32 1) + %171 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %163, i32 0, i32 1 + %172 = load %Array*, %Array** %171, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %172, i32 1) + %173 = bitcast { { %Array*, %Array* }*, %Array* }* %163 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %173, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %174 = add i64 %159, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %142, i32 1) + store i64 0, i64* %nonZero, align 4 + %175 = call %Range @Microsoft__Quantum__Arrays___17428b9baa64414f9b1034cbf7f4a1b5_IndexRange__body(%Array* %ops) + %176 = extractvalue %Range %175, 0 + %177 = extractvalue %Range %175, 1 + %178 = extractvalue %Range %175, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__2 + %179 = icmp sgt i64 %177, 0 + br label %header__3 + +header__3: ; preds = %exiting__3, %preheader__1 + %idxOp = phi i64 [ %176, %preheader__1 ], [ %249, %exiting__3 ] + %180 = icmp sle i64 %idxOp, %178 + %181 = icmp sge i64 %idxOp, %178 + %182 = select i1 %179, i1 %180, i1 %181 + br i1 %182, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %183 = srem i64 %idxOp, 4 + %184 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %v0123, i64 %183) + %185 = bitcast i8* %184 to double* + %186 = load double, double* %185, align 8 + %187 = call i1 @Microsoft__Quantum__Chemistry__IsNotZero__body(double %186) + br i1 %187, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__3 + %188 = srem i64 %idxOp, 4 + %189 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %v0123, i64 %188) + %190 = bitcast i8* %189 to double* + %191 = load double, double* %190, align 8 + %newCoeff = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoeff, i64 0) + %193 = bitcast i8* %192 to double* + store double %191, double* %193, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoeff, i32 1) + %194 = load %Array*, %Array** %genIdxes, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %194, i32 -1) + %195 = call %Array* @__quantum__rt__array_copy(%Array* %194, i1 false) + %196 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %idxOp) + %197 = bitcast i8* %196 to %Array** + %198 = load %Array*, %Array** %197, align 8 + %199 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitsPQJW) + %200 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitsRSJW) + %201 = add i64 %199, %200 + %202 = call %Array* @Microsoft__Quantum__Arrays___776c59d7915545a6a81beb8cdb98d2a4_ConstantArray__body(i64 %201, i64 3) + %203 = call %Array* @__quantum__rt__array_concatenate(%Array* %198, %Array* %202) + call void @__quantum__rt__array_update_reference_count(%Array* %203, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %202, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %203, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 1) + %204 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %205 = bitcast %Tuple* %204 to { %Array*, %Array* }* + %206 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %205, i32 0, i32 0 + %207 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %205, i32 0, i32 1 + store %Array* %203, %Array** %206, align 8 + store %Array* %newCoeff, %Array** %207, align 8 + %208 = load i64, i64* %14, align 4 + %209 = load i64, i64* %16, align 4 + %210 = load i64, i64* %24, align 4 + %211 = load i64, i64* %26, align 4 + %212 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %213 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %212, i64 0) + %214 = bitcast i8* %213 to i64* + %215 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %212, i64 1) + %216 = bitcast i8* %215 to i64* + %217 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %212, i64 2) + %218 = bitcast i8* %217 to i64* + %219 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %212, i64 3) + %220 = bitcast i8* %219 to i64* + store i64 %208, i64* %214, align 4 + store i64 %209, i64* %216, align 4 + store i64 %210, i64* %218, align 4 + store i64 %211, i64* %220, align 4 + %221 = call %Array* @__quantum__rt__array_concatenate(%Array* %212, %Array* %qubitsPQJW) + call void @__quantum__rt__array_update_reference_count(%Array* %221, i32 1) + %222 = call %Array* @__quantum__rt__array_concatenate(%Array* %221, %Array* %qubitsRSJW) + call void @__quantum__rt__array_update_reference_count(%Array* %222, i32 1) + %223 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %205, %Array* %222) + %224 = load i64, i64* %nonZero, align 4 + %225 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %195, i64 %224) + %226 = bitcast i8* %225 to { { %Array*, %Array* }*, %Array* }** + %227 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %223, i32 0, i32 0 + %228 = load { %Array*, %Array* }*, { %Array*, %Array* }** %227, align 8 + %229 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 1) + %231 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 1 + %232 = load %Array*, %Array** %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %232, i32 1) + %233 = bitcast { %Array*, %Array* }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %233, i32 1) + %234 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %223, i32 0, i32 1 + %235 = load %Array*, %Array** %234, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %235, i32 1) + %236 = bitcast { { %Array*, %Array* }*, %Array* }* %223 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %236, i32 1) + %237 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %226, align 8 + %238 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %237, i32 0, i32 0 + %239 = load { %Array*, %Array* }*, { %Array*, %Array* }** %238, align 8 + %240 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %239, i32 0, i32 0 + %241 = load %Array*, %Array** %240, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %241, i32 -1) + %242 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %239, i32 0, i32 1 + %243 = load %Array*, %Array** %242, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %243, i32 -1) + %244 = bitcast { %Array*, %Array* }* %239 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %244, i32 -1) + %245 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %237, i32 0, i32 1 + %246 = load %Array*, %Array** %245, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 -1) + %247 = bitcast { { %Array*, %Array* }*, %Array* }* %237 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %241, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %243, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %244, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %246, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %247, i32 -1) + store { { %Array*, %Array* }*, %Array* }* %223, { { %Array*, %Array* }*, %Array* }** %226, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %195, i32 1) + store %Array* %195, %Array** %genIdxes, align 8 + %248 = add i64 %224, 1 + store i64 %248, i64* %nonZero, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %194, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %203, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %newCoeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %204, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %212, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %221, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %221, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %222, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %222, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__3 + br label %exiting__3 + +exiting__3: ; preds = %continue__1 + %249 = add i64 %idxOp, %177 + br label %header__3 + +exit__3: ; preds = %header__3 + %250 = load %Array*, %Array** %genIdxes, align 8 + %251 = load i64, i64* %nonZero, align 4 + %252 = sub i64 %251, 1 + %253 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %252, 2 + %254 = call %Array* @__quantum__rt__array_slice_1d(%Array* %250, %Range %253, i1 true) + %255 = call i64 @__quantum__rt__array_get_size_1d(%Array* %254) + %256 = sub i64 %255, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %257 = phi i64 [ 0, %exit__3 ], [ %272, %exiting__4 ] + %258 = icmp sle i64 %257, %256 + br i1 %258, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %259 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %254, i64 %257) + %260 = bitcast i8* %259 to { { %Array*, %Array* }*, %Array* }** + %261 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %260, align 8 + %262 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %261, i32 0, i32 0 + %263 = load { %Array*, %Array* }*, { %Array*, %Array* }** %262, align 8 + %264 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %263, i32 0, i32 0 + %265 = load %Array*, %Array** %264, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %265, i32 1) + %266 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %263, i32 0, i32 1 + %267 = load %Array*, %Array** %266, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %267, i32 1) + %268 = bitcast { %Array*, %Array* }* %263 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %268, i32 1) + %269 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %261, i32 0, i32 1 + %270 = load %Array*, %Array** %269, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %270, i32 1) + %271 = bitcast { { %Array*, %Array* }*, %Array* }* %261 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %271, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %272 = add i64 %257, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %254, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %v0123, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRS, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQJW, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsRSJW, i32 -1) + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %273 = phi i64 [ 0, %exit__4 ], [ %278, %exiting__5 ] + %274 = icmp sle i64 %273, 7 + br i1 %274, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %275 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %273) + %276 = bitcast i8* %275 to %Array** + %277 = load %Array*, %Array** %276, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %277, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %278 = add i64 %273, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + %279 = call i64 @__quantum__rt__array_get_size_1d(%Array* %250) + %280 = sub i64 %279, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %281 = phi i64 [ 0, %exit__5 ], [ %296, %exiting__6 ] + %282 = icmp sle i64 %281, %280 + br i1 %282, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %283 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %250, i64 %281) + %284 = bitcast i8* %283 to { { %Array*, %Array* }*, %Array* }** + %285 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %284, align 8 + %286 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %285, i32 0, i32 0 + %287 = load { %Array*, %Array* }*, { %Array*, %Array* }** %286, align 8 + %288 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %287, i32 0, i32 0 + %289 = load %Array*, %Array** %288, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %289, i32 -1) + %290 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %287, i32 0, i32 1 + %291 = load %Array*, %Array** %290, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %291, i32 -1) + %292 = bitcast { %Array*, %Array* }* %287 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %292, i32 -1) + %293 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %285, i32 0, i32 1 + %294 = load %Array*, %Array** %293, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %294, i32 -1) + %295 = bitcast { { %Array*, %Array* }*, %Array* }* %285 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %295, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %296 = add i64 %281, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %250, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsRS, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsPQJW, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsRSJW, i32 -1) + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %297 = phi i64 [ 0, %exit__6 ], [ %302, %exiting__7 ] + %298 = icmp sle i64 %297, 7 + br i1 %298, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %299 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %297) + %300 = bitcast i8* %299 to %Array** + %301 = load %Array*, %Array** %300, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %301, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %302 = add i64 %297, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %132, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %133, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %137, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %254, i32 -1) + %303 = sub i64 %279, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %304 = phi i64 [ 0, %exit__7 ], [ %319, %exiting__8 ] + %305 = icmp sle i64 %304, %303 + br i1 %305, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %306 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %250, i64 %304) + %307 = bitcast i8* %306 to { { %Array*, %Array* }*, %Array* }** + %308 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %307, align 8 + %309 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %308, i32 0, i32 0 + %310 = load { %Array*, %Array* }*, { %Array*, %Array* }** %309, align 8 + %311 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %310, i32 0, i32 0 + %312 = load %Array*, %Array** %311, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %312, i32 -1) + %313 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %310, i32 0, i32 1 + %314 = load %Array*, %Array** %313, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %314, i32 -1) + %315 = bitcast { %Array*, %Array* }* %310 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %315, i32 -1) + %316 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %308, i32 0, i32 1 + %317 = load %Array*, %Array** %316, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %317, i32 -1) + %318 = bitcast { { %Array*, %Array* }*, %Array* }* %308 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %318, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %319 = add i64 %304, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %250, i32 -1) + ret %Array* %254 +} + +define internal %Range @Microsoft__Quantum__Arrays___17428b9baa64414f9b1034cbf7f4a1b5_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = sub i64 %0, 1 + %9 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %8, 2 + %10 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %11 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %11) + %14 = bitcast i8* %13 to %Array** + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %11, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %9 +} + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ZTermToPauliGenIdx____body({ { %Array*, %Array* }*, %Array* }* %term) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 0) + %9 = bitcast i8* %8 to i64* + store i64 3, i64* %9, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + store %Array* %7, %Array** %12, align 8 + store %Array* %coeff, %Array** %13, align 8 + %14 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %11, %Array* %idxFermions) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 0) + %17 = bitcast i8* %16 to { { %Array*, %Array* }*, %Array* }** + store { { %Array*, %Array* }*, %Array* }* %14, { { %Array*, %Array* }*, %Array* }** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret %Array* %15 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ZZTermToPauliGenIdx____body({ { %Array*, %Array* }*, %Array* }* %term) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 0) + %9 = bitcast i8* %8 to i64* + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 1) + %11 = bitcast i8* %10 to i64* + store i64 3, i64* %9, align 4 + store i64 3, i64* %11, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %7, %Array** %14, align 8 + store %Array* %coeff, %Array** %15, align 8 + %16 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %13, %Array* %idxFermions) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to { { %Array*, %Array* }*, %Array* }** + store { { %Array*, %Array* }*, %Array* }* %16, { { %Array*, %Array* }*, %Array* }** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret %Array* %17 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0__JordanWignerStateAsGeneratorIndex____body(%Array* %data, i64 %idx) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %idx) + %15 = bitcast i8* %14 to { { double, double }*, %Array* }** + %16 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %16, i32 0, i32 0 + %18 = load { double, double }*, { double, double }** %17, align 8 + %19 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 0 + %real = load double, double* %19, align 8 + %20 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 1 + %imaginary = load double, double* %20, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %16, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %23 = icmp eq i64 %22, 2 + br i1 %23, label %then0__1, label %test1__1 + +then0__1: ; preds = %exit__1 + %24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 0) + %26 = bitcast i8* %25 to i64* + store i64 0, i64* %26, align 4 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 0) + %29 = bitcast i8* %28 to double* + store double %real, double* %29, align 8 + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array*, %Array* }* + %32 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 1 + store %Array* %24, %Array** %32, align 8 + store %Array* %27, %Array** %33, align 8 + %34 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %31, %Array* %idxFermions) + %35 = sub i64 %0, 1 + br label %header__2 + +test1__1: ; preds = %exit__1 + %36 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %37 = icmp eq i64 %36, 4 + br i1 %37, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i64* + store i64 2, i64* %40, align 4 + %41 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 0) + %43 = bitcast i8* %42 to double* + store double %real, double* %43, align 8 + %44 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %45 = bitcast %Tuple* %44 to { %Array*, %Array* }* + %46 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %45, i32 0, i32 0 + %47 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %45, i32 0, i32 1 + store %Array* %38, %Array** %46, align 8 + store %Array* %41, %Array** %47, align 8 + %48 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %45, %Array* %idxFermions) + %49 = sub i64 %0, 1 + br label %header__3 + +else__1: ; preds = %test1__1 + %50 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 0) + %52 = bitcast i8* %51 to i64* + store i64 -1, i64* %52, align 4 + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to double* + store double 0.000000e+00, double* %55, align 8 + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { %Array*, %Array* }* + %58 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %57, i32 0, i32 1 + store %Array* %50, %Array** %58, align 8 + store %Array* %53, %Array** %59, align 8 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i64* + store i64 0, i64* %62, align 4 + %63 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %57, %Array* %60) + %64 = sub i64 %0, 1 + br label %header__4 + +continue__1: ; No predecessors! + unreachable + +header__2: ; preds = %exiting__2, %then0__1 + %65 = phi i64 [ 0, %then0__1 ], [ %76, %exiting__2 ] + %66 = icmp sle i64 %65, %35 + br i1 %66, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %65) + %68 = bitcast i8* %67 to { { double, double }*, %Array* }** + %69 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %68, align 8 + %70 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %69, i32 0, i32 0 + %71 = load { double, double }*, { double, double }** %70, align 8 + %72 = bitcast { double, double }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %72, i32 -1) + %73 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %69, i32 0, i32 1 + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + %75 = bitcast { { double, double }*, %Array* }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %75, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %65, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %34 + +header__3: ; preds = %exiting__3, %then1__1 + %77 = phi i64 [ 0, %then1__1 ], [ %88, %exiting__3 ] + %78 = icmp sle i64 %77, %49 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %77) + %80 = bitcast i8* %79 to { { double, double }*, %Array* }** + %81 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %80, align 8 + %82 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %81, i32 0, i32 0 + %83 = load { double, double }*, { double, double }** %82, align 8 + %84 = bitcast { double, double }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %84, i32 -1) + %85 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %81, i32 0, i32 1 + %86 = load %Array*, %Array** %85, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %86, i32 -1) + %87 = bitcast { { double, double }*, %Array* }* %81 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %87, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %88 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %44, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %48 + +header__4: ; preds = %exiting__4, %else__1 + %89 = phi i64 [ 0, %else__1 ], [ %100, %exiting__4 ] + %90 = icmp sle i64 %89, %64 + br i1 %90, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %89) + %92 = bitcast i8* %91 to { { double, double }*, %Array* }** + %93 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %92, align 8 + %94 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %93, i32 0, i32 0 + %95 = load { double, double }*, { double, double }** %94, align 8 + %96 = bitcast { double, double }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %96, i32 -1) + %97 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %93, i32 0, i32 1 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 -1) + %99 = bitcast { { double, double }*, %Array* }* %93 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %99, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %100 = add i64 %89, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %63 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQandPQQRTerm___body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fmul double 1.000000e+00, %9 + %angle = fmul double %10, %stepSize + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %12 = bitcast i8* %11 to i64* + %qubitQidx = load i64, i64* %12, align 4 + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %14 = icmp eq i64 %13, 2 + br i1 %14, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 0) + %17 = bitcast i8* %16 to double* + store double 1.000000e+00, double* %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Array* }* + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 1 + store %Array* %idxTermType, %Array** %20, align 8 + store %Array* %15, %Array** %21, align 8 + %termPR0 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %19, %Array* %idxFermions) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR0, i32 0, i32 0 + %23 = load { %Array*, %Array* }*, { %Array*, %Array* }** %22, align 8 + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %23, i32 0, i32 0 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %23, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %28 = bitcast { %Array*, %Array* }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + %29 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR0, i32 0, i32 1 + %30 = load %Array*, %Array** %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 1) + %31 = bitcast { { %Array*, %Array* }*, %Array* }* %termPR0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 1) + %32 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___body({ { %Array*, %Array* }*, %Array* }* %termPR0, double %angle, %Array* %32, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %continue__1 + +else__1: ; preds = %entry + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %34 = bitcast i8* %33 to i64* + %35 = load i64, i64* %34, align 4 + %36 = icmp slt i64 %35, %qubitQidx + br i1 %36, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %else__1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %38 = bitcast i8* %37 to i64* + %39 = load i64, i64* %38, align 4 + %40 = icmp slt i64 %qubitQidx, %39 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %else__1 + %41 = phi i1 [ %40, %condTrue__1 ], [ %36, %else__1 ] + br i1 %41, label %then0__2, label %else__2 + +then0__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + %42 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 0) + %44 = bitcast i8* %43 to double* + store double 1.000000e+00, double* %44, align 8 + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { %Array*, %Array* }* + %47 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %46, i32 0, i32 1 + store %Array* %idxTermType, %Array** %47, align 8 + store %Array* %42, %Array** %48, align 8 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %50 = bitcast i8* %49 to i64* + %51 = load i64, i64* %50, align 4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %53 = bitcast i8* %52 to i64* + %54 = load i64, i64* %53, align 4 + %55 = sub i64 %54, 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i64* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i64* + store i64 %51, i64* %58, align 4 + store i64 %55, i64* %60, align 4 + %termPR1 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %46, %Array* %56) + %61 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR1, i32 0, i32 0 + %62 = load { %Array*, %Array* }*, { %Array*, %Array* }** %61, align 8 + %63 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 0 + %64 = load %Array*, %Array** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %65 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + %67 = bitcast { %Array*, %Array* }* %62 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + %68 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR1, i32 0, i32 1 + %69 = load %Array*, %Array** %68, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 1) + %70 = bitcast { { %Array*, %Array* }*, %Array* }* %termPR1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 1) + %71 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %72 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %72, i64 0) + %74 = bitcast i8* %73 to i64* + store i64 %qubitQidx, i64* %74, align 4 + %75 = call %Array* @Microsoft__Quantum__Arrays___d4fd6982c609481a8ea49bf9914e223e_Excluding__body(%Array* %72, %Array* %qubits) + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___body({ { %Array*, %Array* }*, %Array* }* %termPR1, double %angle, %Array* %71, %Array* %75) + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %64, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %66, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %67, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %70, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %75, i32 -1) + br label %continue__2 + +else__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + %76 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %76, i64 0) + %78 = bitcast i8* %77 to double* + store double 1.000000e+00, double* %78, align 8 + %79 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %80 = bitcast %Tuple* %79 to { %Array*, %Array* }* + %81 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %80, i32 0, i32 0 + %82 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %80, i32 0, i32 1 + store %Array* %idxTermType, %Array** %81, align 8 + store %Array* %76, %Array** %82, align 8 + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %84 = bitcast i8* %83 to i64* + %85 = load i64, i64* %84, align 4 + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %87 = bitcast i8* %86 to i64* + %88 = load i64, i64* %87, align 4 + %89 = sub i64 %85, %88 + %90 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %90, i64 0) + %92 = bitcast i8* %91 to i64* + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %90, i64 1) + %94 = bitcast i8* %93 to i64* + store i64 0, i64* %92, align 4 + store i64 %89, i64* %94, align 4 + %termPR1__1 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %80, %Array* %90) + %95 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR1__1, i32 0, i32 0 + %96 = load { %Array*, %Array* }*, { %Array*, %Array* }** %95, align 8 + %97 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %96, i32 0, i32 0 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 1) + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %96, i32 0, i32 1 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = bitcast { %Array*, %Array* }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %101, i32 1) + %102 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR1__1, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { { %Array*, %Array* }*, %Array* }* %termPR1__1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %qubitQidx) + %106 = bitcast i8* %105 to %Qubit** + %107 = load %Qubit*, %Qubit** %106, align 8 + %108 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 0) + %110 = bitcast i8* %109 to %Qubit** + store %Qubit* %107, %Qubit** %110, align 8 + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %112 = bitcast i8* %111 to i64* + %113 = load i64, i64* %112, align 4 + %114 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %115 = bitcast i8* %114 to i64* + %116 = load i64, i64* %115, align 4 + %117 = insertvalue %Range zeroinitializer, i64 %113, 0 + %118 = insertvalue %Range %117, i64 1, 1 + %119 = insertvalue %Range %118, i64 %116, 2 + %120 = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %119, i1 true) + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___body({ { %Array*, %Array* }*, %Array* }* %termPR1__1, double %angle, %Array* %108, %Array* %120) + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %101, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %90, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %101, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %103, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %108, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + br label %continue__1 + +continue__1: ; preds = %continue__2, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %extraParityQubits, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %extraParityQubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fmul double 1.000000e+00, %9 + %angle = fmul double %10, %stepSize + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %15 = load i64, i64* %12, align 4 + %16 = load i64, i64* %14, align 4 + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to i64* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to i64* + store i64 %15, i64* %19, align 4 + store i64 %16, i64* %21, align 4 + %qubitsPQ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %17, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %23 = bitcast i8* %22 to i64* + %24 = load i64, i64* %23, align 4 + %25 = add i64 %24, 1 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %27 = bitcast i8* %26 to i64* + %28 = load i64, i64* %27, align 4 + %29 = sub i64 %28, 1 + %30 = insertvalue %Range zeroinitializer, i64 %25, 0 + %31 = insertvalue %Range %30, i64 1, 1 + %32 = insertvalue %Range %31, i64 %29, 2 + %qubitsJW = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %32, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsJW, i32 1) + %33 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 0) + %35 = bitcast i8* %34 to i2* + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 1) + %37 = bitcast i8* %36 to i2* + store i2 1, i2* %35, align 1 + store i2 1, i2* %37, align 1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i2* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 1) + %42 = bitcast i8* %41 to i2* + store i2 -1, i2* %40, align 1 + store i2 -1, i2* %42, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %44 = bitcast i8* %43 to %Array** + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %46 = bitcast i8* %45 to %Array** + store %Array* %33, %Array** %44, align 8 + store %Array* %38, %Array** %46, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %47 = phi i64 [ 0, %entry ], [ %52, %exiting__1 ] + %48 = icmp sle i64 %47, 1 + br i1 %48, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %47) + %50 = bitcast i8* %49 to %Array** + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %52 = add i64 %47, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %53 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitsJW) + %54 = call i64 @__quantum__rt__array_get_size_1d(%Array* %extraParityQubits) + %55 = add i64 %53, %54 + %padding = call %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %55, i2 -2) + call void @__quantum__rt__array_update_alias_count(%Array* %padding, i32 1) + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %56 = phi i64 [ 0, %exit__1 ], [ %61, %exiting__2 ] + %57 = icmp sle i64 %56, 1 + br i1 %57, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %56) + %59 = bitcast i8* %58 to %Array** + %op = load %Array*, %Array** %59, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %paulis = call %Array* @__quantum__rt__array_concatenate(%Array* %op, %Array* %padding) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %60 = call %Array* @__quantum__rt__array_concatenate(%Array* %qubitsPQ, %Array* %qubitsJW) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %60, %Array* %extraParityQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %angle, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %61 = add i64 %56, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %extraParityQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsJW, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %62 = phi i64 [ 0, %exit__2 ], [ %67, %exiting__3 ] + %63 = icmp sle i64 %62, 1 + br i1 %63, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %62) + %65 = bitcast i8* %64 to %Array** + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %67 = add i64 %62, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %padding, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsJW, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %68 = phi i64 [ 0, %exit__3 ], [ %73, %exiting__4 ] + %69 = icmp sle i64 %68, 1 + br i1 %69, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %68) + %71 = bitcast i8* %70 to %Array** + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %73 = add i64 %68, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %padding, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___d4fd6982c609481a8ea49bf9914e223e_Excluding__body(%Array* %remove, %Array* %array) { +entry: + %counter = alloca i64, align 8 + %sliced = alloca %Array*, align 8 + %arrayKeep = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %remove, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %remove) + %nElements = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %nElements, %nSliced + %1 = icmp sle i64 %0, 0 + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %remove, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %2 + +continue__1: ; preds = %entry + %3 = sub i64 %nElements, 1 + %4 = call %Array* @Microsoft__Quantum__Arrays__SequenceI__body(i64 0, i64 %3) + store %Array* %4, %Array** %arrayKeep, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %6 = bitcast i8* %5 to %Qubit** + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = sub i64 %nElements, %nSliced + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %8) + %10 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %11 = phi i64 [ 0, %continue__1 ], [ %15, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 %11) + %14 = bitcast i8* %13 to %Qubit** + store %Qubit* %7, %Qubit** %14, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %9, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + store i64 0, i64* %counter, align 4 + %16 = sub i64 %nSliced, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %remove, i64 %17) + %20 = bitcast i8* %19 to i64* + %idx = load i64, i64* %20, align 4 + %21 = load %Array*, %Array** %arrayKeep, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 -1) + %22 = call %Array* @__quantum__rt__array_copy(%Array* %21, i1 false) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %22, i64 %idx) + %24 = bitcast i8* %23 to i64* + store i64 -1, i64* %24, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %22, i32 1) + store %Array* %22, %Array** %arrayKeep, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %26 = load %Array*, %Array** %arrayKeep, align 8 + %27 = call i64 @__quantum__rt__array_get_size_1d(%Array* %26) + %28 = sub i64 %27, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %43, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %26, i64 %29) + %32 = bitcast i8* %31 to i64* + %idx__1 = load i64, i64* %32, align 4 + %33 = icmp sge i64 %idx__1, 0 + br i1 %33, label %then0__2, label %continue__2 + +then0__2: ; preds = %body__3 + %34 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx__1) + %37 = bitcast i8* %36 to %Qubit** + %38 = load %Qubit*, %Qubit** %37, align 8 + %39 = load i64, i64* %counter, align 4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %39) + %41 = bitcast i8* %40 to %Qubit** + store %Qubit* %38, %Qubit** %41, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %sliced, align 8 + %42 = add i64 %39, 1 + store i64 %42, i64* %counter, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %body__3 + br label %exiting__3 + +exiting__3: ; preds = %continue__2 + %43 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %44 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %remove, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + ret %Array* %44 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQandPQQRTerm___adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fmul double 1.000000e+00, %9 + %__qsVar3__angle__ = fmul double %10, %stepSize + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %12 = bitcast i8* %11 to i64* + %__qsVar4__qubitQidx__ = load i64, i64* %12, align 4 + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar2__idxFermions__) + %14 = icmp eq i64 %13, 2 + br i1 %14, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 0) + %17 = bitcast i8* %16 to double* + store double 1.000000e+00, double* %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Array* }* + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 1 + store %Array* %__qsVar0__idxTermType__, %Array** %20, align 8 + store %Array* %15, %Array** %21, align 8 + %__qsVar5__termPR0__ = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %19, %Array* %__qsVar2__idxFermions__) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar5__termPR0__, i32 0, i32 0 + %23 = load { %Array*, %Array* }*, { %Array*, %Array* }** %22, align 8 + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %23, i32 0, i32 0 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %23, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %28 = bitcast { %Array*, %Array* }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + %29 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar5__termPR0__, i32 0, i32 1 + %30 = load %Array*, %Array** %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 1) + %31 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar5__termPR0__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 1) + %32 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___adj({ { %Array*, %Array* }*, %Array* }* %__qsVar5__termPR0__, double %__qsVar3__angle__, %Array* %32, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %continue__1 + +else__1: ; preds = %entry + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %34 = bitcast i8* %33 to i64* + %35 = load i64, i64* %34, align 4 + %36 = icmp slt i64 %35, %__qsVar4__qubitQidx__ + br i1 %36, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %else__1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %38 = bitcast i8* %37 to i64* + %39 = load i64, i64* %38, align 4 + %40 = icmp slt i64 %__qsVar4__qubitQidx__, %39 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %else__1 + %41 = phi i1 [ %40, %condTrue__1 ], [ %36, %else__1 ] + br i1 %41, label %then0__2, label %else__2 + +then0__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + %42 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 0) + %44 = bitcast i8* %43 to double* + store double 1.000000e+00, double* %44, align 8 + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { %Array*, %Array* }* + %47 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %46, i32 0, i32 1 + store %Array* %__qsVar0__idxTermType__, %Array** %47, align 8 + store %Array* %42, %Array** %48, align 8 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %50 = bitcast i8* %49 to i64* + %51 = load i64, i64* %50, align 4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %53 = bitcast i8* %52 to i64* + %54 = load i64, i64* %53, align 4 + %55 = sub i64 %54, 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i64* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i64* + store i64 %51, i64* %58, align 4 + store i64 %55, i64* %60, align 4 + %__qsVar6__termPR1__ = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %46, %Array* %56) + %61 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar6__termPR1__, i32 0, i32 0 + %62 = load { %Array*, %Array* }*, { %Array*, %Array* }** %61, align 8 + %63 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 0 + %64 = load %Array*, %Array** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %65 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + %67 = bitcast { %Array*, %Array* }* %62 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + %68 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar6__termPR1__, i32 0, i32 1 + %69 = load %Array*, %Array** %68, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 1) + %70 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar6__termPR1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 1) + %71 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %72 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %72, i64 0) + %74 = bitcast i8* %73 to i64* + store i64 %__qsVar4__qubitQidx__, i64* %74, align 4 + %75 = call %Array* @Microsoft__Quantum__Arrays___d4fd6982c609481a8ea49bf9914e223e_Excluding__body(%Array* %72, %Array* %qubits) + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___adj({ { %Array*, %Array* }*, %Array* }* %__qsVar6__termPR1__, double %__qsVar3__angle__, %Array* %71, %Array* %75) + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %64, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %66, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %67, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %70, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %75, i32 -1) + br label %continue__2 + +else__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + %76 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %76, i64 0) + %78 = bitcast i8* %77 to double* + store double 1.000000e+00, double* %78, align 8 + %79 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %80 = bitcast %Tuple* %79 to { %Array*, %Array* }* + %81 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %80, i32 0, i32 0 + %82 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %80, i32 0, i32 1 + store %Array* %__qsVar0__idxTermType__, %Array** %81, align 8 + store %Array* %76, %Array** %82, align 8 + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %84 = bitcast i8* %83 to i64* + %85 = load i64, i64* %84, align 4 + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %87 = bitcast i8* %86 to i64* + %88 = load i64, i64* %87, align 4 + %89 = sub i64 %85, %88 + %90 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %90, i64 0) + %92 = bitcast i8* %91 to i64* + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %90, i64 1) + %94 = bitcast i8* %93 to i64* + store i64 0, i64* %92, align 4 + store i64 %89, i64* %94, align 4 + %__qsVar7__termPR1__ = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %80, %Array* %90) + %95 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar7__termPR1__, i32 0, i32 0 + %96 = load { %Array*, %Array* }*, { %Array*, %Array* }** %95, align 8 + %97 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %96, i32 0, i32 0 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 1) + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %96, i32 0, i32 1 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = bitcast { %Array*, %Array* }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %101, i32 1) + %102 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar7__termPR1__, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar7__termPR1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %__qsVar4__qubitQidx__) + %106 = bitcast i8* %105 to %Qubit** + %107 = load %Qubit*, %Qubit** %106, align 8 + %108 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 0) + %110 = bitcast i8* %109 to %Qubit** + store %Qubit* %107, %Qubit** %110, align 8 + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %112 = bitcast i8* %111 to i64* + %113 = load i64, i64* %112, align 4 + %114 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %115 = bitcast i8* %114 to i64* + %116 = load i64, i64* %115, align 4 + %117 = insertvalue %Range zeroinitializer, i64 %113, 0 + %118 = insertvalue %Range %117, i64 1, 1 + %119 = insertvalue %Range %118, i64 %116, 2 + %120 = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %119, i1 true) + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___adj({ { %Array*, %Array* }*, %Array* }* %__qsVar7__termPR1__, double %__qsVar3__angle__, %Array* %108, %Array* %120) + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %101, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %90, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %101, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %103, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %108, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + br label %continue__1 + +continue__1: ; preds = %continue__2, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %extraParityQubits, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %extraParityQubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fmul double 1.000000e+00, %9 + %__qsVar3__angle__ = fmul double %10, %stepSize + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %15 = load i64, i64* %12, align 4 + %16 = load i64, i64* %14, align 4 + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to i64* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to i64* + store i64 %15, i64* %19, align 4 + store i64 %16, i64* %21, align 4 + %__qsVar4__qubitsPQ__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %17, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsPQ__, i32 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %23 = bitcast i8* %22 to i64* + %24 = load i64, i64* %23, align 4 + %25 = add i64 %24, 1 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %27 = bitcast i8* %26 to i64* + %28 = load i64, i64* %27, align 4 + %29 = sub i64 %28, 1 + %30 = insertvalue %Range zeroinitializer, i64 %25, 0 + %31 = insertvalue %Range %30, i64 1, 1 + %32 = insertvalue %Range %31, i64 %29, 2 + %__qsVar5__qubitsJW__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %32, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar5__qubitsJW__, i32 1) + %33 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 0) + %35 = bitcast i8* %34 to i2* + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 1) + %37 = bitcast i8* %36 to i2* + store i2 1, i2* %35, align 1 + store i2 1, i2* %37, align 1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i2* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 1) + %42 = bitcast i8* %41 to i2* + store i2 -1, i2* %40, align 1 + store i2 -1, i2* %42, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %44 = bitcast i8* %43 to %Array** + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %46 = bitcast i8* %45 to %Array** + store %Array* %33, %Array** %44, align 8 + store %Array* %38, %Array** %46, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %47 = phi i64 [ 0, %entry ], [ %52, %exiting__1 ] + %48 = icmp sle i64 %47, 1 + br i1 %48, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %47) + %50 = bitcast i8* %49 to %Array** + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %52 = add i64 %47, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %53 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar5__qubitsJW__) + %54 = call i64 @__quantum__rt__array_get_size_1d(%Array* %extraParityQubits) + %55 = add i64 %53, %54 + %__qsVar7__padding__ = call %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %55, i2 -2) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__padding__, i32 1) + %56 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to %Array** + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to %Array** + store %Array* %38, %Array** %58, align 8 + store %Array* %33, %Array** %60, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 1) + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %61 = phi i64 [ 0, %exit__1 ], [ %66, %exiting__2 ] + %62 = icmp sle i64 %61, 1 + br i1 %62, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %61) + %64 = bitcast i8* %63 to %Array** + %__qsVar8__op__ = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %paulis = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar8__op__, %Array* %__qsVar7__padding__) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %65 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar4__qubitsPQ__, %Array* %__qsVar5__qubitsJW__) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %65, %Array* %extraParityQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %__qsVar3__angle__, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %66 = add i64 %61, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %extraParityQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsPQ__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar5__qubitsJW__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %67 = phi i64 [ 0, %exit__2 ], [ %72, %exiting__3 ] + %68 = icmp sle i64 %67, 1 + br i1 %68, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %67) + %70 = bitcast i8* %69 to %Array** + %71 = load %Array*, %Array** %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %71, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %72 = add i64 %67, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__padding__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__qubitsPQ__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar5__qubitsJW__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %73 = phi i64 [ 0, %exit__3 ], [ %78, %exiting__4 ] + %74 = icmp sle i64 %73, 1 + br i1 %74, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %73) + %76 = bitcast i8* %75 to %Array** + %77 = load %Array*, %Array** %76, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %77, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %78 = add i64 %73, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__padding__, i32 -1) + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %79 = phi i64 [ 0, %exit__4 ], [ %84, %exiting__5 ] + %80 = icmp sle i64 %79, 1 + br i1 %80, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %79) + %82 = bitcast i8* %81 to %Array** + %83 = load %Array*, %Array** %82, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %84 = add i64 %79, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQandPQQRTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %12 = bitcast i8* %11 to double* + %13 = load double, double* %12, align 8 + %14 = fmul double 1.000000e+00, %13 + %angle = fmul double %14, %stepSize + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %16 = bitcast i8* %15 to i64* + %qubitQidx = load i64, i64* %16, align 4 + %17 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %18 = icmp eq i64 %17, 2 + br i1 %18, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 0) + %21 = bitcast i8* %20 to double* + store double 1.000000e+00, double* %21, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { %Array*, %Array* }* + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %23, i32 0, i32 1 + store %Array* %idxTermType, %Array** %24, align 8 + store %Array* %19, %Array** %25, align 8 + %termPR0 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %23, %Array* %idxFermions) + %26 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR0, i32 0, i32 0 + %27 = load { %Array*, %Array* }*, { %Array*, %Array* }** %26, align 8 + %28 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %27, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 1) + %30 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %27, i32 0, i32 1 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + %32 = bitcast { %Array*, %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 1) + %33 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR0, i32 0, i32 1 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 1) + %35 = bitcast { { %Array*, %Array* }*, %Array* }* %termPR0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 1) + %36 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* + %39 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38, i32 0, i32 1 + %41 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38, i32 0, i32 2 + %42 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38, i32 0, i32 3 + store { { %Array*, %Array* }*, %Array* }* %termPR0, { { %Array*, %Array* }*, %Array* }** %39, align 8 + store double %angle, double* %40, align 8 + store %Array* %36, %Array** %41, align 8 + store %Array* %qubits, %Array** %42, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38) + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + br label %continue__1 + +else__1: ; preds = %entry + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %44 = bitcast i8* %43 to i64* + %45 = load i64, i64* %44, align 4 + %46 = icmp slt i64 %45, %qubitQidx + br i1 %46, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %else__1 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %48 = bitcast i8* %47 to i64* + %49 = load i64, i64* %48, align 4 + %50 = icmp slt i64 %qubitQidx, %49 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %else__1 + %51 = phi i1 [ %50, %condTrue__1 ], [ %46, %else__1 ] + br i1 %51, label %then0__2, label %else__2 + +then0__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + %52 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 0) + %54 = bitcast i8* %53 to double* + store double 1.000000e+00, double* %54, align 8 + %55 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %56 = bitcast %Tuple* %55 to { %Array*, %Array* }* + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %56, i32 0, i32 0 + %58 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %56, i32 0, i32 1 + store %Array* %idxTermType, %Array** %57, align 8 + store %Array* %52, %Array** %58, align 8 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %60 = bitcast i8* %59 to i64* + %61 = load i64, i64* %60, align 4 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %63 = bitcast i8* %62 to i64* + %64 = load i64, i64* %63, align 4 + %65 = sub i64 %64, 1 + %66 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 0) + %68 = bitcast i8* %67 to i64* + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 1) + %70 = bitcast i8* %69 to i64* + store i64 %61, i64* %68, align 4 + store i64 %65, i64* %70, align 4 + %termPR1 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %56, %Array* %66) + %71 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR1, i32 0, i32 0 + %72 = load { %Array*, %Array* }*, { %Array*, %Array* }** %71, align 8 + %73 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %72, i32 0, i32 0 + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 1) + %75 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %72, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { %Array*, %Array* }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + %78 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR1, i32 0, i32 1 + %79 = load %Array*, %Array** %78, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %79, i32 1) + %80 = bitcast { { %Array*, %Array* }*, %Array* }* %termPR1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %74, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %79, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 1) + %81 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %82 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 0) + %84 = bitcast i8* %83 to i64* + store i64 %qubitQidx, i64* %84, align 4 + %85 = call %Array* @Microsoft__Quantum__Arrays___d4fd6982c609481a8ea49bf9914e223e_Excluding__body(%Array* %82, %Array* %qubits) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + %86 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* null, i32 1) to i64)) + %87 = bitcast %Tuple* %86 to { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* + %88 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87, i32 0, i32 0 + %89 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87, i32 0, i32 1 + %90 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87, i32 0, i32 2 + %91 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87, i32 0, i32 3 + store { { %Array*, %Array* }*, %Array* }* %termPR1, { { %Array*, %Array* }*, %Array* }** %88, align 8 + store double %angle, double* %89, align 8 + store %Array* %81, %Array** %90, align 8 + store %Array* %85, %Array** %91, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87) + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %74, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %79, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %74, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %79, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %85, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 -1) + br label %continue__2 + +else__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + %92 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to double* + store double 1.000000e+00, double* %94, align 8 + %95 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %96 = bitcast %Tuple* %95 to { %Array*, %Array* }* + %97 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %96, i32 0, i32 0 + %98 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %96, i32 0, i32 1 + store %Array* %idxTermType, %Array** %97, align 8 + store %Array* %92, %Array** %98, align 8 + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %100 = bitcast i8* %99 to i64* + %101 = load i64, i64* %100, align 4 + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %103 = bitcast i8* %102 to i64* + %104 = load i64, i64* %103, align 4 + %105 = sub i64 %101, %104 + %106 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %106, i64 0) + %108 = bitcast i8* %107 to i64* + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %106, i64 1) + %110 = bitcast i8* %109 to i64* + store i64 0, i64* %108, align 4 + store i64 %105, i64* %110, align 4 + %termPR1__1 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %96, %Array* %106) + %111 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR1__1, i32 0, i32 0 + %112 = load { %Array*, %Array* }*, { %Array*, %Array* }** %111, align 8 + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %112, i32 0, i32 0 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 1) + %115 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %112, i32 0, i32 1 + %116 = load %Array*, %Array** %115, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %116, i32 1) + %117 = bitcast { %Array*, %Array* }* %112 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %117, i32 1) + %118 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %termPR1__1, i32 0, i32 1 + %119 = load %Array*, %Array** %118, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 1) + %120 = bitcast { { %Array*, %Array* }*, %Array* }* %termPR1__1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %120, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %116, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %117, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %120, i32 1) + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %qubitQidx) + %122 = bitcast i8* %121 to %Qubit** + %123 = load %Qubit*, %Qubit** %122, align 8 + %124 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %124, i64 0) + %126 = bitcast i8* %125 to %Qubit** + store %Qubit* %123, %Qubit** %126, align 8 + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %128 = bitcast i8* %127 to i64* + %129 = load i64, i64* %128, align 4 + %130 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %131 = bitcast i8* %130 to i64* + %132 = load i64, i64* %131, align 4 + %133 = insertvalue %Range zeroinitializer, i64 %129, 0 + %134 = insertvalue %Range %133, i64 1, 1 + %135 = insertvalue %Range %134, i64 %132, 2 + %136 = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %135, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %136, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %136, i32 -1) + %137 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* null, i32 1) to i64)) + %138 = bitcast %Tuple* %137 to { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* + %139 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138, i32 0, i32 0 + %140 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138, i32 0, i32 1 + %141 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138, i32 0, i32 2 + %142 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138, i32 0, i32 3 + store { { %Array*, %Array* }*, %Array* }* %termPR1__1, { { %Array*, %Array* }*, %Array* }** %139, align 8 + store double %angle, double* %140, align 8 + store %Array* %124, %Array** %141, align 8 + store %Array* %136, %Array** %142, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138) + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %116, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %117, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %92, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %95, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %106, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %116, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %117, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %116, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %117, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %124, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %136, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %137, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + br label %continue__1 + +continue__1: ; preds = %continue__2, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0, i32 0, i32 2 + %extraParityQubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %extraParityQubits, i32 1) + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %13 = bitcast i8* %12 to double* + %14 = load double, double* %13, align 8 + %15 = fmul double 1.000000e+00, %14 + %angle = fmul double %15, %stepSize + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %17 = bitcast i8* %16 to i64* + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %19 = bitcast i8* %18 to i64* + %20 = load i64, i64* %17, align 4 + %21 = load i64, i64* %19, align 4 + %22 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %22, i64 0) + %24 = bitcast i8* %23 to i64* + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %22, i64 1) + %26 = bitcast i8* %25 to i64* + store i64 %20, i64* %24, align 4 + store i64 %21, i64* %26, align 4 + %qubitsPQ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %22, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 1) + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %28 = bitcast i8* %27 to i64* + %29 = load i64, i64* %28, align 4 + %30 = add i64 %29, 1 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %32 = bitcast i8* %31 to i64* + %33 = load i64, i64* %32, align 4 + %34 = sub i64 %33, 1 + %35 = insertvalue %Range zeroinitializer, i64 %30, 0 + %36 = insertvalue %Range %35, i64 1, 1 + %37 = insertvalue %Range %36, i64 %34, 2 + %qubitsJW = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %37, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsJW, i32 1) + %38 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i2* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 1) + %42 = bitcast i8* %41 to i2* + store i2 1, i2* %40, align 1 + store i2 1, i2* %42, align 1 + %43 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 0) + %45 = bitcast i8* %44 to i2* + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 1) + %47 = bitcast i8* %46 to i2* + store i2 -1, i2* %45, align 1 + store i2 -1, i2* %47, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %49 = bitcast i8* %48 to %Array** + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %51 = bitcast i8* %50 to %Array** + store %Array* %38, %Array** %49, align 8 + store %Array* %43, %Array** %51, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %52 = phi i64 [ 0, %entry ], [ %57, %exiting__1 ] + %53 = icmp sle i64 %52, 1 + br i1 %53, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %52) + %55 = bitcast i8* %54 to %Array** + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %57 = add i64 %52, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %58 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubitsJW) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %extraParityQubits) + %60 = add i64 %58, %59 + %padding = call %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %60, i2 -2) + call void @__quantum__rt__array_update_alias_count(%Array* %padding, i32 1) + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %61 = phi i64 [ 0, %exit__1 ], [ %71, %exiting__2 ] + %62 = icmp sle i64 %61, 1 + br i1 %62, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %61) + %64 = bitcast i8* %63 to %Array** + %op = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_concatenate(%Array* %op, %Array* %padding) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %65 = call %Array* @__quantum__rt__array_concatenate(%Array* %qubitsPQ, %Array* %qubitsJW) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %65, %Array* %extraParityQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %66 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %67 = bitcast %Tuple* %66 to { %Array*, double, %Array* }* + %68 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %67, i32 0, i32 0 + %69 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %67, i32 0, i32 1 + %70 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %67, i32 0, i32 2 + store %Array* %paulis, %Array** %68, align 8 + store double %angle, double* %69, align 8 + store %Array* %qubits__1, %Array** %70, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %67) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %71 = add i64 %61, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %extraParityQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsJW, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %72 = phi i64 [ 0, %exit__2 ], [ %77, %exiting__3 ] + %73 = icmp sle i64 %72, 1 + br i1 %73, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %72) + %75 = bitcast i8* %74 to %Array** + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %77 = add i64 %72, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %padding, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsPQ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsJW, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %78 = phi i64 [ 0, %exit__3 ], [ %83, %exiting__4 ] + %79 = icmp sle i64 %78, 1 + br i1 %79, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %78) + %81 = bitcast i8* %80 to %Array** + %82 = load %Array*, %Array** %81, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %83 = add i64 %78, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %padding, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQandPQQRTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %12 = bitcast i8* %11 to double* + %13 = load double, double* %12, align 8 + %14 = fmul double 1.000000e+00, %13 + %__qsVar3__angle__ = fmul double %14, %stepSize + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %16 = bitcast i8* %15 to i64* + %__qsVar4__qubitQidx__ = load i64, i64* %16, align 4 + %17 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar2__idxFermions__) + %18 = icmp eq i64 %17, 2 + br i1 %18, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 0) + %21 = bitcast i8* %20 to double* + store double 1.000000e+00, double* %21, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { %Array*, %Array* }* + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %23, i32 0, i32 1 + store %Array* %__qsVar0__idxTermType__, %Array** %24, align 8 + store %Array* %19, %Array** %25, align 8 + %__qsVar5__termPR0__ = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %23, %Array* %__qsVar2__idxFermions__) + %26 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar5__termPR0__, i32 0, i32 0 + %27 = load { %Array*, %Array* }*, { %Array*, %Array* }** %26, align 8 + %28 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %27, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 1) + %30 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %27, i32 0, i32 1 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + %32 = bitcast { %Array*, %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 1) + %33 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar5__termPR0__, i32 0, i32 1 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 1) + %35 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar5__termPR0__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 1) + %36 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* + %39 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38, i32 0, i32 1 + %41 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38, i32 0, i32 2 + %42 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38, i32 0, i32 3 + store { { %Array*, %Array* }*, %Array* }* %__qsVar5__termPR0__, { { %Array*, %Array* }*, %Array* }** %39, align 8 + store double %__qsVar3__angle__, double* %40, align 8 + store %Array* %36, %Array** %41, align 8 + store %Array* %qubits, %Array** %42, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %38) + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + br label %continue__1 + +else__1: ; preds = %entry + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %44 = bitcast i8* %43 to i64* + %45 = load i64, i64* %44, align 4 + %46 = icmp slt i64 %45, %__qsVar4__qubitQidx__ + br i1 %46, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %else__1 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %48 = bitcast i8* %47 to i64* + %49 = load i64, i64* %48, align 4 + %50 = icmp slt i64 %__qsVar4__qubitQidx__, %49 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %else__1 + %51 = phi i1 [ %50, %condTrue__1 ], [ %46, %else__1 ] + br i1 %51, label %then0__2, label %else__2 + +then0__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + %52 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 0) + %54 = bitcast i8* %53 to double* + store double 1.000000e+00, double* %54, align 8 + %55 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %56 = bitcast %Tuple* %55 to { %Array*, %Array* }* + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %56, i32 0, i32 0 + %58 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %56, i32 0, i32 1 + store %Array* %__qsVar0__idxTermType__, %Array** %57, align 8 + store %Array* %52, %Array** %58, align 8 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %60 = bitcast i8* %59 to i64* + %61 = load i64, i64* %60, align 4 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %63 = bitcast i8* %62 to i64* + %64 = load i64, i64* %63, align 4 + %65 = sub i64 %64, 1 + %66 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 0) + %68 = bitcast i8* %67 to i64* + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 1) + %70 = bitcast i8* %69 to i64* + store i64 %61, i64* %68, align 4 + store i64 %65, i64* %70, align 4 + %__qsVar6__termPR1__ = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %56, %Array* %66) + %71 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar6__termPR1__, i32 0, i32 0 + %72 = load { %Array*, %Array* }*, { %Array*, %Array* }** %71, align 8 + %73 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %72, i32 0, i32 0 + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 1) + %75 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %72, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { %Array*, %Array* }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + %78 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar6__termPR1__, i32 0, i32 1 + %79 = load %Array*, %Array** %78, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %79, i32 1) + %80 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar6__termPR1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %74, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %79, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 1) + %81 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %82 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 0) + %84 = bitcast i8* %83 to i64* + store i64 %__qsVar4__qubitQidx__, i64* %84, align 4 + %85 = call %Array* @Microsoft__Quantum__Arrays___d4fd6982c609481a8ea49bf9914e223e_Excluding__body(%Array* %82, %Array* %qubits) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + %86 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* null, i32 1) to i64)) + %87 = bitcast %Tuple* %86 to { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* + %88 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87, i32 0, i32 0 + %89 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87, i32 0, i32 1 + %90 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87, i32 0, i32 2 + %91 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87, i32 0, i32 3 + store { { %Array*, %Array* }*, %Array* }* %__qsVar6__termPR1__, { { %Array*, %Array* }*, %Array* }** %88, align 8 + store double %__qsVar3__angle__, double* %89, align 8 + store %Array* %81, %Array** %90, align 8 + store %Array* %85, %Array** %91, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %87) + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %74, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %79, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %74, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %79, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %85, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 -1) + br label %continue__2 + +else__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + %92 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to double* + store double 1.000000e+00, double* %94, align 8 + %95 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %96 = bitcast %Tuple* %95 to { %Array*, %Array* }* + %97 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %96, i32 0, i32 0 + %98 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %96, i32 0, i32 1 + store %Array* %__qsVar0__idxTermType__, %Array** %97, align 8 + store %Array* %92, %Array** %98, align 8 + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %100 = bitcast i8* %99 to i64* + %101 = load i64, i64* %100, align 4 + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %103 = bitcast i8* %102 to i64* + %104 = load i64, i64* %103, align 4 + %105 = sub i64 %101, %104 + %106 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %106, i64 0) + %108 = bitcast i8* %107 to i64* + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %106, i64 1) + %110 = bitcast i8* %109 to i64* + store i64 0, i64* %108, align 4 + store i64 %105, i64* %110, align 4 + %__qsVar7__termPR1__ = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %96, %Array* %106) + %111 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar7__termPR1__, i32 0, i32 0 + %112 = load { %Array*, %Array* }*, { %Array*, %Array* }** %111, align 8 + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %112, i32 0, i32 0 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 1) + %115 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %112, i32 0, i32 1 + %116 = load %Array*, %Array** %115, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %116, i32 1) + %117 = bitcast { %Array*, %Array* }* %112 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %117, i32 1) + %118 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar7__termPR1__, i32 0, i32 1 + %119 = load %Array*, %Array** %118, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 1) + %120 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar7__termPR1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %120, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %116, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %117, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %120, i32 1) + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %__qsVar4__qubitQidx__) + %122 = bitcast i8* %121 to %Qubit** + %123 = load %Qubit*, %Qubit** %122, align 8 + %124 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %124, i64 0) + %126 = bitcast i8* %125 to %Qubit** + store %Qubit* %123, %Qubit** %126, align 8 + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %128 = bitcast i8* %127 to i64* + %129 = load i64, i64* %128, align 4 + %130 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %131 = bitcast i8* %130 to i64* + %132 = load i64, i64* %131, align 4 + %133 = insertvalue %Range zeroinitializer, i64 %129, 0 + %134 = insertvalue %Range %133, i64 1, 1 + %135 = insertvalue %Range %134, i64 %132, 2 + %136 = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %135, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %136, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %136, i32 -1) + %137 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* null, i32 1) to i64)) + %138 = bitcast %Tuple* %137 to { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* + %139 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138, i32 0, i32 0 + %140 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138, i32 0, i32 1 + %141 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138, i32 0, i32 2 + %142 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138, i32 0, i32 3 + store { { %Array*, %Array* }*, %Array* }* %__qsVar7__termPR1__, { { %Array*, %Array* }*, %Array* }** %139, align 8 + store double %__qsVar3__angle__, double* %140, align 8 + store %Array* %124, %Array** %141, align 8 + store %Array* %136, %Array** %142, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %138) + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %116, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %117, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %92, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %95, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %106, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %116, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %117, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %116, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %117, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %124, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %136, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %137, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + br label %continue__1 + +continue__1: ; preds = %continue__2, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0, i32 0, i32 2 + %extraParityQubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %extraParityQubits, i32 1) + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %13 = bitcast i8* %12 to double* + %14 = load double, double* %13, align 8 + %15 = fmul double 1.000000e+00, %14 + %__qsVar3__angle__ = fmul double %15, %stepSize + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %17 = bitcast i8* %16 to i64* + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %19 = bitcast i8* %18 to i64* + %20 = load i64, i64* %17, align 4 + %21 = load i64, i64* %19, align 4 + %22 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %22, i64 0) + %24 = bitcast i8* %23 to i64* + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %22, i64 1) + %26 = bitcast i8* %25 to i64* + store i64 %20, i64* %24, align 4 + store i64 %21, i64* %26, align 4 + %__qsVar4__qubitsPQ__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %22, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsPQ__, i32 1) + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %28 = bitcast i8* %27 to i64* + %29 = load i64, i64* %28, align 4 + %30 = add i64 %29, 1 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %32 = bitcast i8* %31 to i64* + %33 = load i64, i64* %32, align 4 + %34 = sub i64 %33, 1 + %35 = insertvalue %Range zeroinitializer, i64 %30, 0 + %36 = insertvalue %Range %35, i64 1, 1 + %37 = insertvalue %Range %36, i64 %34, 2 + %__qsVar5__qubitsJW__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %qubits, %Range %37, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar5__qubitsJW__, i32 1) + %38 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i2* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 1) + %42 = bitcast i8* %41 to i2* + store i2 1, i2* %40, align 1 + store i2 1, i2* %42, align 1 + %43 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 0) + %45 = bitcast i8* %44 to i2* + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 1) + %47 = bitcast i8* %46 to i2* + store i2 -1, i2* %45, align 1 + store i2 -1, i2* %47, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %49 = bitcast i8* %48 to %Array** + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %51 = bitcast i8* %50 to %Array** + store %Array* %38, %Array** %49, align 8 + store %Array* %43, %Array** %51, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %52 = phi i64 [ 0, %entry ], [ %57, %exiting__1 ] + %53 = icmp sle i64 %52, 1 + br i1 %53, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %52) + %55 = bitcast i8* %54 to %Array** + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %57 = add i64 %52, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %58 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar5__qubitsJW__) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %extraParityQubits) + %60 = add i64 %58, %59 + %__qsVar7__padding__ = call %Array* @Microsoft__Quantum__Arrays___041f4b97ff464238a7f7da162bd94e8e_ConstantArray__body(i64 %60, i2 -2) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__padding__, i32 1) + %61 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 0) + %63 = bitcast i8* %62 to %Array** + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 1) + %65 = bitcast i8* %64 to %Array** + store %Array* %43, %Array** %63, align 8 + store %Array* %38, %Array** %65, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %43, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 1) + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %66 = phi i64 [ 0, %exit__1 ], [ %76, %exiting__2 ] + %67 = icmp sle i64 %66, 1 + br i1 %67, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %66) + %69 = bitcast i8* %68 to %Array** + %__qsVar8__op__ = load %Array*, %Array** %69, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar8__op__, %Array* %__qsVar7__padding__) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %70 = call %Array* @__quantum__rt__array_concatenate(%Array* %__qsVar4__qubitsPQ__, %Array* %__qsVar5__qubitsJW__) + call void @__quantum__rt__array_update_reference_count(%Array* %70, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %70, %Array* %extraParityQubits) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { %Array*, double, %Array* }* + %73 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 1 + %75 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 2 + store %Array* %paulis, %Array** %73, align 8 + store double %__qsVar3__angle__, double* %74, align 8 + store %Array* %qubits__1, %Array** %75, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %72) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %70, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %70, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %66, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %extraParityQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsPQ__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar5__qubitsJW__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %77 = phi i64 [ 0, %exit__2 ], [ %82, %exiting__3 ] + %78 = icmp sle i64 %77, 1 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %77) + %80 = bitcast i8* %79 to %Array** + %81 = load %Array*, %Array** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %82 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__padding__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__qubitsPQ__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar5__qubitsJW__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %83 = phi i64 [ 0, %exit__3 ], [ %88, %exiting__4 ] + %84 = icmp sle i64 %83, 1 + br i1 %84, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %83) + %86 = bitcast i8* %85 to %Array** + %87 = load %Array*, %Array** %86, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %88 = add i64 %83, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__padding__, i32 -1) + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %89 = phi i64 [ 0, %exit__4 ], [ %94, %exiting__5 ] + %90 = icmp sle i64 %89, 1 + br i1 %90, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %89) + %92 = bitcast i8* %91 to %Array** + %93 = load %Array*, %Array** %92, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %93, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %94 = add i64 %89, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %61, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZTerm___body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fmul double 1.000000e+00, %9 + %angle = fmul double %10, %stepSize + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %13 = load i64, i64* %12, align 4 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %13) + %15 = bitcast i8* %14 to %Qubit** + %qubit = load %Qubit*, %Qubit** %15, align 8 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %17 = bitcast i8* %16 to i2* + store i2 -2, i2* %17, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__1, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %qubit, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %angle, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZTerm___adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fmul double 1.000000e+00, %9 + %__qsVar3__angle__ = fmul double %10, %stepSize + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %13 = load i64, i64* %12, align 4 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %13) + %15 = bitcast i8* %14 to %Qubit** + %__qsVar4__qubit__ = load %Qubit*, %Qubit** %15, align 8 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %17 = bitcast i8* %16 to i2* + store i2 -2, i2* %17, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__1, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %__qsVar4__qubit__, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %__qsVar3__angle__, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %12 = bitcast i8* %11 to double* + %13 = load double, double* %12, align 8 + %14 = fmul double 1.000000e+00, %13 + %angle = fmul double %14, %stepSize + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %16 = bitcast i8* %15 to i64* + %17 = load i64, i64* %16, align 4 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %17) + %19 = bitcast i8* %18 to %Qubit** + %qubit = load %Qubit*, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %21 = bitcast i8* %20 to i2* + store i2 -2, i2* %21, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__1, i64 0) + %23 = bitcast i8* %22 to %Qubit** + store %Qubit* %qubit, %Qubit** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Array*, double, %Array* }* + %26 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %25, i32 0, i32 2 + store %Array* %paulis, %Array** %26, align 8 + store double %angle, double* %27, align 8 + store %Array* %qubits__1, %Array** %28, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %25) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %12 = bitcast i8* %11 to double* + %13 = load double, double* %12, align 8 + %14 = fmul double 1.000000e+00, %13 + %__qsVar3__angle__ = fmul double %14, %stepSize + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %16 = bitcast i8* %15 to i64* + %17 = load i64, i64* %16, align 4 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %17) + %19 = bitcast i8* %18 to %Qubit** + %__qsVar4__qubit__ = load %Qubit*, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %21 = bitcast i8* %20 to i2* + store i2 -2, i2* %21, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %qubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__1, i64 0) + %23 = bitcast i8* %22 to %Qubit** + store %Qubit* %__qsVar4__qubit__, %Qubit** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Array*, double, %Array* }* + %26 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %25, i32 0, i32 2 + store %Array* %paulis, %Array** %26, align 8 + store double %__qsVar3__angle__, double* %27, align 8 + store %Array* %qubits__1, %Array** %28, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %25) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZZTerm___body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fmul double 1.000000e+00, %9 + %angle = fmul double %10, %stepSize + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %15 = load i64, i64* %12, align 4 + %16 = load i64, i64* %14, align 4 + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to i64* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to i64* + store i64 %15, i64* %19, align 4 + store i64 %16, i64* %21, align 4 + %qubitsZZ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %17, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsZZ, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %25 = bitcast i8* %24 to i2* + store i2 -2, i2* %23, align 1 + store i2 -2, i2* %25, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsZZ, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %angle, %Array* %qubitsZZ) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsZZ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsZZ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsZZ, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZZTerm___adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fmul double 1.000000e+00, %9 + %__qsVar3__angle__ = fmul double %10, %stepSize + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %15 = load i64, i64* %12, align 4 + %16 = load i64, i64* %14, align 4 + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to i64* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to i64* + store i64 %15, i64* %19, align 4 + store i64 %16, i64* %21, align 4 + %__qsVar4__qubitsZZ__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %17, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsZZ__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %25 = bitcast i8* %24 to i2* + store i2 -2, i2* %23, align 1 + store i2 -2, i2* %25, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsZZ__, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %__qsVar3__angle__, %Array* %__qsVar4__qubitsZZ__) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsZZ__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsZZ__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__qubitsZZ__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZZTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %12 = bitcast i8* %11 to double* + %13 = load double, double* %12, align 8 + %14 = fmul double 1.000000e+00, %13 + %angle = fmul double %14, %stepSize + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %16 = bitcast i8* %15 to i64* + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %18 = bitcast i8* %17 to i64* + %19 = load i64, i64* %16, align 4 + %20 = load i64, i64* %18, align 4 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to i64* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 1) + %25 = bitcast i8* %24 to i64* + store i64 %19, i64* %23, align 4 + store i64 %20, i64* %25, align 4 + %qubitsZZ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %21, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsZZ, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 -2, i2* %27, align 1 + store i2 -2, i2* %29, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsZZ, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsZZ, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array*, double, %Array* }* + %32 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 1 + %34 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 2 + store %Array* %paulis, %Array** %32, align 8 + store double %angle, double* %33, align 8 + store %Array* %qubitsZZ, %Array** %34, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %31) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsZZ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsZZ, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitsZZ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitsZZ, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZZTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %12 = bitcast i8* %11 to double* + %13 = load double, double* %12, align 8 + %14 = fmul double 1.000000e+00, %13 + %__qsVar3__angle__ = fmul double %14, %stepSize + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %16 = bitcast i8* %15 to i64* + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %18 = bitcast i8* %17 to i64* + %19 = load i64, i64* %16, align 4 + %20 = load i64, i64* %18, align 4 + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 0) + %23 = bitcast i8* %22 to i64* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 1) + %25 = bitcast i8* %24 to i64* + store i64 %19, i64* %23, align 4 + store i64 %20, i64* %25, align 4 + %__qsVar4__qubitsZZ__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %21, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsZZ__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 -2, i2* %27, align 1 + store i2 -2, i2* %29, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsZZ__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__qubitsZZ__, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array*, double, %Array* }* + %32 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 1 + %34 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 2 + store %Array* %paulis, %Array** %32, align 8 + store double %__qsVar3__angle__, double* %33, align 8 + store %Array* %__qsVar4__qubitsZZ__, %Array** %34, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %31) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsZZ__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__qubitsZZ__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__qubitsZZ__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__qubitsZZ__, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerBitString__body(i64 %nFermions, %Array* %idxFermions) { +entry: + %zString = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %1 = srem i64 %0, 2 + %2 = icmp ne i64 %1, 0 + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([86 x i8], [86 x i8]* @4, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nFermions) + %5 = sub i64 %nFermions, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %6 = phi i64 [ 0, %continue__1 ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %6) + %9 = bitcast i8* %8 to i1* + store i1 false, i1* %9, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %4, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %24, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %13) + %16 = bitcast i8* %15 to i64* + %fermionIdx = load i64, i64* %16, align 4 + %17 = icmp sge i64 %fermionIdx, %nFermions + br i1 %17, label %then0__2, label %continue__2 + +then0__2: ; preds = %body__2 + %18 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @5, i32 0, i32 0)) + %19 = call %String* @__quantum__rt__int_to_string(i64 %fermionIdx) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @6, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__fail(%String* %22) + unreachable + +continue__2: ; preds = %body__2 + br label %header__3 + +exiting__2: ; preds = %exit__3 + %24 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %25 = sub i64 %11, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %continue__2 + %idx = phi i64 [ 0, %continue__2 ], [ %35, %exiting__3 ] + %26 = icmp sle i64 %idx, %fermionIdx + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = call %Array* @__quantum__rt__array_copy(%Array* %27, i1 false) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %idx) + %30 = bitcast i8* %29 to i1* + %31 = load i1, i1* %30, align 1 + %32 = xor i1 %31, true + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %idx) + %34 = bitcast i8* %33 to i1* + store i1 %32, i1* %34, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + store %Array* %28, %Array** %zString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + br label %exiting__2 + +header__4: ; preds = %exiting__4, %exit__2 + %36 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__4 ] + %37 = icmp sle i64 %36, %25 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %36) + %39 = bitcast i8* %38 to i64* + %fermionIdx__1 = load i64, i64* %39, align 4 + %40 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + %41 = call %Array* @__quantum__rt__array_copy(%Array* %40, i1 false) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %fermionIdx__1) + %43 = bitcast i8* %42 to i1* + store i1 false, i1* %43, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + store %Array* %41, %Array** %zString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %44 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %45 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + ret %Array* %45 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliZString__body(i64 %nFermions, %Array* %idxFermions) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %bitString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerBitString__body(i64 %nFermions, %Array* %idxFermions) + call void @__quantum__rt__array_update_alias_count(%Array* %bitString, i32 1) + %0 = call %Array* @Microsoft__Quantum__Convert__BoolArrayAsPauli__body(i2 -2, i1 true, %Array* %bitString) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bitString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bitString, i32 -1) + ret %Array* %0 +} + +define internal %Range @Microsoft__Quantum__Arrays___f18da7cbe4e940478813d7485ea738db_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Array* @Microsoft__Quantum__Convert__BoolArrayAsPauli__body(i2 %pauli, i1 %bitApply, %Array* %bits) { +entry: + %paulis = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nBits) + %1 = sub i64 %nBits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %0, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %7 = sub i64 %nBits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %8 = icmp sle i64 %idxBit, %7 + br i1 %8, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %9 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + %10 = call %Array* @__quantum__rt__array_copy(%Array* %9, i1 false) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bits, i64 %idxBit) + %12 = bitcast i8* %11 to i1* + %13 = load i1, i1* %12, align 1 + %14 = icmp eq i1 %13, %bitApply + %15 = select i1 %14, i2 %pauli, i2 0 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %idxBit) + %17 = bitcast i8* %16 to i2* + store i2 %15, i2* %17, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + store %Array* %10, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 -1) + ret %Array* %19 +} + +define internal i64 @Microsoft__Quantum__Math__Min__body(%Array* %values) { +entry: + %min = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 0) + %1 = bitcast i8* %0 to i64* + %2 = load i64, i64* %1, align 4 + store i64 %2, i64* %min, align 4 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %3 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = load i64, i64* %min, align 4 + %9 = icmp slt i64 %7, %8 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %11 = bitcast i8* %10 to i64* + %12 = load i64, i64* %11, align 4 + store i64 %12, i64* %min, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %13 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load i64, i64* %min, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + ret i64 %14 +} + +define internal i64 @Microsoft__Quantum__Math__Max__body(%Array* %values) { +entry: + %max = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 0) + %1 = bitcast i8* %0 to i64* + %2 = load i64, i64* %1, align 4 + store i64 %2, i64* %max, align 4 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %3 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = load i64, i64* %max, align 4 + %9 = icmp sgt i64 %7, %8 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %11 = bitcast i8* %10 to i64* + %12 = load i64, i64* %11, align 4 + store i64 %12, i64* %max, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %13 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load i64, i64* %max, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + ret i64 %14 +} + +define internal %Range @Microsoft__Quantum__Arrays___700b015a14454be98b56de747498937e_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %13 = sub i64 %0, 1 + %14 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %13, 2 + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %26, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to { %Array*, %Array* }** + %20 = load { %Array*, %Array* }*, { %Array*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %22 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %22, i32 -1) + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %24 = load %Array*, %Array** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %24, i32 -1) + %25 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %14 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %term, %Array* %termType) { +entry: + %0 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %term, i32 0, i32 0 + %idxFermions = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %term, i32 0, i32 1 + %coeff = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %2 = bitcast { %Array*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Array* }* + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + store %Array* %termType, %Array** %5, align 8 + store %Array* %coeff, %Array** %6, align 8 + %7 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %4, %Array* %idxFermions) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %7 +} + +define internal %Range @Microsoft__Quantum__Arrays___1282ba485eb84ebd9e61ed357fc1aebb_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { %Array*, %Array* }*, %Array* }** + %6 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 0 + %8 = load { %Array*, %Array* }*, { %Array*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %16 = bitcast { { %Array*, %Array* }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %18 = sub i64 %0, 1 + %19 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %18, 2 + %20 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %36, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to { { %Array*, %Array* }*, %Array* }** + %25 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %24, align 8 + %26 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %25, i32 0, i32 0 + %27 = load { %Array*, %Array* }*, { %Array*, %Array* }** %26, align 8 + %28 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %27, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + %30 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %27, i32 0, i32 1 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 -1) + %32 = bitcast { %Array*, %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + %33 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %25, i32 0, i32 1 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = bitcast { { %Array*, %Array* }*, %Array* }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %19 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %__Item1__, %Callable* %__Item2__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item2__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64, %Callable* }* + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 1 + store i64 %__Item1__, i64* %2, align 4 + store %Callable* %__Item2__, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item2__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item2__, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal %Callable* @Microsoft__Quantum__Arrays___afda34fbc524426087f3337291e18d1f_LookupFunction__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { %Array*, %Array* }*, %Array* }** + %6 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 0 + %8 = load { %Array*, %Array* }*, { %Array*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %16 = bitcast { { %Array*, %Array* }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %18 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___b20df4913ab0459888bcf1448be084b3_ElementAt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %19 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %35, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { { %Array*, %Array* }*, %Array* }** + %24 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %24, i32 0, i32 0 + %26 = load { %Array*, %Array* }*, { %Array*, %Array* }** %25, align 8 + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 0 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + %29 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 1 + %30 = load %Array*, %Array** %29, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 1) + %31 = bitcast { %Array*, %Array* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 1) + %32 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %24, i32 0, i32 1 + %33 = load %Array*, %Array** %32, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 1) + %34 = bitcast { { %Array*, %Array* }*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %35 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %array, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Callable*, %Array* }* + %38 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %37, i32 0, i32 1 + store %Callable* %18, %Callable** %38, align 8 + store %Array* %array, %Array** %39, align 8 + %40 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__52__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__28__FunctionTable, %Tuple* %36) + %41 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %42 = phi i64 [ 0, %exit__2 ], [ %57, %exiting__3 ] + %43 = icmp sle i64 %42, %41 + br i1 %43, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %42) + %45 = bitcast i8* %44 to { { %Array*, %Array* }*, %Array* }** + %46 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %45, align 8 + %47 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %46, i32 0, i32 0 + %48 = load { %Array*, %Array* }*, { %Array*, %Array* }** %47, align 8 + %49 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 0 + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 -1) + %51 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 1 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %52, i32 -1) + %53 = bitcast { %Array*, %Array* }* %48 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 -1) + %54 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %46, i32 0, i32 1 + %55 = load %Array*, %Array** %54, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %55, i32 -1) + %56 = bitcast { { %Array*, %Array* }*, %Array* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %57 = add i64 %42, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Callable* %40 +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorEvolutionSet__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorFunction____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { %Callable* }* %1 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorFunction____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array* }*, %Array* }* + %1 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___JordanWignerClusterOperatorFunction____body({ { %Array*, %Array* }*, %Array* }* %0) + %2 = bitcast %Tuple* %result-tuple to { { %Callable* }* }* + %3 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %2, i32 0, i32 0 + store { %Callable* }* %1, { %Callable* }** %3, align 8 + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorGeneratorSystem__body(%Array* %data) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0__JordanWignerStateAsGeneratorIndex____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %16) + %19 = bitcast i8* %18 to { { double, double }*, %Array* }** + %20 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 0 + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 1) + %24 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 1) + %26 = bitcast { { double, double }*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %data, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, %Array* }* + %30 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %29, i32 0, i32 1 + store %Callable* %14, %Callable** %30, align 8 + store %Array* %data, %Array** %31, align 8 + %32 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__5__FunctionTable, %Tuple* %28) + %33 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %0, %Callable* %32) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %46, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %35) + %38 = bitcast i8* %37 to { { double, double }*, %Array* }** + %39 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %39, i32 0, i32 0 + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %43 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %39, i32 0, i32 1 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %45 = bitcast { { double, double }*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret { i64, %Callable* }* %33 +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64 }* + %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, i64 }* getelementptr ({ %Array*, i64 }, { %Array*, i64 }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, i64 }* + %8 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store i64 %5, i64* %9, align 4 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0__JordanWignerStateAsGeneratorIndex____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load i64, i64* %2, align 4 + %5 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0__JordanWignerStateAsGeneratorIndex____body(%Array* %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %7 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %6, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %5, { { %Array*, %Array* }*, %Array* }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__5__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__5__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionEvolutionSet__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionFunction__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionFunction__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array* }*, %Array* }* + %1 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionFunction__body({ { %Array*, %Array* }*, %Array* }* %0) + %2 = bitcast %Tuple* %result-tuple to { { %Callable* }* }* + %3 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %2, i32 0, i32 0 + store { %Callable* }* %1, { %Callable* }** %3, align 8 + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionFunction__body({ { %Array*, %Array* }*, %Array* }* %generatorIndex) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__3__FunctionTable, %Tuple* %11) + %16 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret { %Callable* }* %16 +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__body({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__adj({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__ctl(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__ctladj(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %8 = bitcast i8* %7 to i64* + %termType = load i64, i64* %8, align 4 + %9 = icmp eq i64 %termType, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZTerm___body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %termType, 1 + br i1 %10, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZZTerm___body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %11 = icmp eq i64 %termType, 2 + br i1 %11, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQandPQQRTerm___body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %12 = icmp eq i64 %termType, 3 + br i1 %12, label %then3__1, label %continue__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWigner0123Term_____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then3__1, %test3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__termType__ = load i64, i64* %8, align 4 + %9 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZTerm___adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %__qsVar3__termType__, 1 + br i1 %10, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZZTerm___adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %11 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %11, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQandPQQRTerm___adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %12 = icmp eq i64 %__qsVar3__termType__, 3 + br i1 %12, label %then3__1, label %continue__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWigner0123Term_____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then3__1, %test3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %12 = bitcast i8* %11 to i64* + %termType = load i64, i64* %12, align 4 + %13 = icmp eq i64 %termType, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %termType, 1 + br i1 %19, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZZTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %25 = icmp eq i64 %termType, 2 + br i1 %25, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %28 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %27, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %28, align 8 + store double %stepSize, double* %29, align 8 + store %Array* %qubits, %Array** %30, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQandPQQRTerm___ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %27) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %31 = icmp eq i64 %termType, 3 + br i1 %31, label %then3__1, label %continue__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %34 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %33, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %34, align 8 + store double %stepSize, double* %35, align 8 + store %Array* %qubits, %Array** %36, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWigner0123Term_____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %33) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then3__1, %test3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerFermionImpl__ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__termType__ = load i64, i64* %12, align 4 + %13 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %__qsVar3__termType__, 1 + br i1 %19, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerZZTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %25 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %25, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %28 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %27, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %28, align 8 + store double %stepSize, double* %29, align 8 + store %Array* %qubits, %Array** %30, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner___ApplyJordanWignerPQandPQQRTerm___ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %27) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %31 = icmp eq i64 %__qsVar3__termType__, 3 + br i1 %31, label %then3__1, label %continue__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %34 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %33, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %34, align 8 + store double %stepSize, double* %35, align 8 + store %Array* %qubits, %Array** %36, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___ApplyJordanWigner0123Term_____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %33) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then3__1, %test3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %data) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 0 + %ZData = load %Array*, %Array** %0, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZData) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %3) + %6 = bitcast i8* %5 to { %Array*, %Array* }** + %7 = load { %Array*, %Array* }*, { %Array*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array*, %Array* }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %14 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 1 + %ZZData = load %Array*, %Array** %14, align 8 + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZZData) + %16 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %17) + %20 = bitcast i8* %19 to { %Array*, %Array* }** + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %28 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 2 + %PQandPQQRData = load %Array*, %Array** %28, align 8 + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %PQandPQQRData) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %31) + %34 = bitcast i8* %33 to { %Array*, %Array* }** + %35 = load { %Array*, %Array* }*, { %Array*, %Array* }** %34, align 8 + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = load %Array*, %Array** %36, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %37, i32 1) + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + %39 = load %Array*, %Array** %38, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 1) + %40 = bitcast { %Array*, %Array* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %42 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 3 + %h0123Data = load %Array*, %Array** %42, align 8 + %43 = call i64 @__quantum__rt__array_get_size_1d(%Array* %h0123Data) + %44 = sub i64 %43, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %55, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %45) + %48 = bitcast i8* %47 to { %Array*, %Array* }** + %49 = load { %Array*, %Array* }*, { %Array*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 0 + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 1) + %52 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 1 + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + %54 = bitcast { %Array*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %55 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %56 = bitcast { %Array*, %Array*, %Array*, %Array* }* %data to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = sub i64 %1, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %58 = phi i64 [ 0, %exit__4 ], [ %68, %exiting__5 ] + %59 = icmp sle i64 %58, %57 + br i1 %59, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %58) + %61 = bitcast i8* %60 to { %Array*, %Array* }** + %62 = load { %Array*, %Array* }*, { %Array*, %Array* }** %61, align 8 + %63 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 0 + %64 = load %Array*, %Array** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %65 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + %67 = bitcast { %Array*, %Array* }* %62 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %68 = add i64 %58, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %69 = sub i64 %15, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %70 = phi i64 [ 0, %exit__5 ], [ %80, %exiting__6 ] + %71 = icmp sle i64 %70, %69 + br i1 %71, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %70) + %73 = bitcast i8* %72 to { %Array*, %Array* }** + %74 = load { %Array*, %Array* }*, { %Array*, %Array* }** %73, align 8 + %75 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 0 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 1 + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %78, i32 1) + %79 = bitcast { %Array*, %Array* }* %74 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %70, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %81 = sub i64 %29, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %82 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %82) + %85 = bitcast i8* %84 to { %Array*, %Array* }** + %86 = load { %Array*, %Array* }*, { %Array*, %Array* }** %85, align 8 + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + %89 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 1 + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 1) + %91 = bitcast { %Array*, %Array* }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %82, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %93 = sub i64 %43, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %94 = phi i64 [ 0, %exit__7 ], [ %104, %exiting__8 ] + %95 = icmp sle i64 %94, %93 + br i1 %95, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %94) + %97 = bitcast i8* %96 to { %Array*, %Array* }** + %98 = load { %Array*, %Array* }*, { %Array*, %Array* }** %97, align 8 + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 1 + %102 = load %Array*, %Array** %101, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %103 = bitcast { %Array*, %Array* }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %104 = add i64 %94, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %105 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i64* + store i64 0, i64* %107, align 4 + %ZGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %ZData, %Array* %105) + %108 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %ZGenSys, i32 0, i32 1 + %109 = load %Callable*, %Callable** %108, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %109, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %109, i32 1) + %110 = bitcast { i64, %Callable* }* %ZGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %110, i32 1) + %111 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %111, i64 0) + %113 = bitcast i8* %112 to i64* + store i64 1, i64* %113, align 4 + %ZZGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %ZZData, %Array* %111) + %114 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %ZZGenSys, i32 0, i32 1 + %115 = load %Callable*, %Callable** %114, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %115, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %115, i32 1) + %116 = bitcast { i64, %Callable* }* %ZZGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + %117 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %117, i64 0) + %119 = bitcast i8* %118 to i64* + store i64 2, i64* %119, align 4 + %PQandPQQRGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %PQandPQQRData, %Array* %117) + %120 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %PQandPQQRGenSys, i32 0, i32 1 + %121 = load %Callable*, %Callable** %120, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %121, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %121, i32 1) + %122 = bitcast { i64, %Callable* }* %PQandPQQRGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 1) + %123 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %123, i64 0) + %125 = bitcast i8* %124 to i64* + store i64 3, i64* %125, align 4 + %h0123GenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %h0123Data, %Array* %123) + %126 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %h0123GenSys, i32 0, i32 1 + %127 = load %Callable*, %Callable** %126, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %127, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %127, i32 1) + %128 = bitcast { i64, %Callable* }* %h0123GenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %109, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %109, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %115, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %115, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %121, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %127, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %127, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + %129 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %130 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 0) + %131 = bitcast i8* %130 to { i64, %Callable* }** + %132 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 1) + %133 = bitcast i8* %132 to { i64, %Callable* }** + %134 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 2) + %135 = bitcast i8* %134 to { i64, %Callable* }** + %136 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 3) + %137 = bitcast i8* %136 to { i64, %Callable* }** + store { i64, %Callable* }* %ZGenSys, { i64, %Callable* }** %131, align 8 + store { i64, %Callable* }* %ZZGenSys, { i64, %Callable* }** %133, align 8 + store { i64, %Callable* }* %PQandPQQRGenSys, { i64, %Callable* }** %135, align 8 + store { i64, %Callable* }* %h0123GenSys, { i64, %Callable* }** %137, align 8 + %138 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__SumGeneratorSystems__body(%Array* %129) + %139 = sub i64 %1, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %140 = phi i64 [ 0, %exit__8 ], [ %150, %exiting__9 ] + %141 = icmp sle i64 %140, %139 + br i1 %141, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %140) + %143 = bitcast i8* %142 to { %Array*, %Array* }** + %144 = load { %Array*, %Array* }*, { %Array*, %Array* }** %143, align 8 + %145 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %144, i32 0, i32 0 + %146 = load %Array*, %Array** %145, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %146, i32 -1) + %147 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %144, i32 0, i32 1 + %148 = load %Array*, %Array** %147, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %148, i32 -1) + %149 = bitcast { %Array*, %Array* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %149, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %150 = add i64 %140, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %151 = sub i64 %15, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %152 = phi i64 [ 0, %exit__9 ], [ %162, %exiting__10 ] + %153 = icmp sle i64 %152, %151 + br i1 %153, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %152) + %155 = bitcast i8* %154 to { %Array*, %Array* }** + %156 = load { %Array*, %Array* }*, { %Array*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 0 + %158 = load %Array*, %Array** %157, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %158, i32 -1) + %159 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 1 + %160 = load %Array*, %Array** %159, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %160, i32 -1) + %161 = bitcast { %Array*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %162 = add i64 %152, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %163 = sub i64 %29, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %164 = phi i64 [ 0, %exit__10 ], [ %174, %exiting__11 ] + %165 = icmp sle i64 %164, %163 + br i1 %165, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %164) + %167 = bitcast i8* %166 to { %Array*, %Array* }** + %168 = load { %Array*, %Array* }*, { %Array*, %Array* }** %167, align 8 + %169 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %168, i32 0, i32 0 + %170 = load %Array*, %Array** %169, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %170, i32 -1) + %171 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %168, i32 0, i32 1 + %172 = load %Array*, %Array** %171, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %172, i32 -1) + %173 = bitcast { %Array*, %Array* }* %168 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %173, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %174 = add i64 %164, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %175 = sub i64 %43, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %176 = phi i64 [ 0, %exit__11 ], [ %186, %exiting__12 ] + %177 = icmp sle i64 %176, %175 + br i1 %177, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %176) + %179 = bitcast i8* %178 to { %Array*, %Array* }** + %180 = load { %Array*, %Array* }*, { %Array*, %Array* }** %179, align 8 + %181 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %180, i32 0, i32 0 + %182 = load %Array*, %Array** %181, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 -1) + %183 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %180, i32 0, i32 1 + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %184, i32 -1) + %185 = bitcast { %Array*, %Array* }* %180 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %185, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %186 = add i64 %176, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + %187 = sub i64 %1, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %188 = phi i64 [ 0, %exit__12 ], [ %198, %exiting__13 ] + %189 = icmp sle i64 %188, %187 + br i1 %189, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %188) + %191 = bitcast i8* %190 to { %Array*, %Array* }** + %192 = load { %Array*, %Array* }*, { %Array*, %Array* }** %191, align 8 + %193 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %192, i32 0, i32 0 + %194 = load %Array*, %Array** %193, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %194, i32 -1) + %195 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %192, i32 0, i32 1 + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %196, i32 -1) + %197 = bitcast { %Array*, %Array* }* %192 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %197, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %198 = add i64 %188, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %199 = sub i64 %15, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %200 = phi i64 [ 0, %exit__13 ], [ %210, %exiting__14 ] + %201 = icmp sle i64 %200, %199 + br i1 %201, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %202 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %200) + %203 = bitcast i8* %202 to { %Array*, %Array* }** + %204 = load { %Array*, %Array* }*, { %Array*, %Array* }** %203, align 8 + %205 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %204, i32 0, i32 0 + %206 = load %Array*, %Array** %205, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 -1) + %207 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %204, i32 0, i32 1 + %208 = load %Array*, %Array** %207, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %208, i32 -1) + %209 = bitcast { %Array*, %Array* }* %204 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %210 = add i64 %200, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %211 = sub i64 %29, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %212 = phi i64 [ 0, %exit__14 ], [ %222, %exiting__15 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %212) + %215 = bitcast i8* %214 to { %Array*, %Array* }** + %216 = load { %Array*, %Array* }*, { %Array*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 0 + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + %219 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 1 + %220 = load %Array*, %Array** %219, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %220, i32 -1) + %221 = bitcast { %Array*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %221, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %222 = add i64 %212, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %223 = sub i64 %43, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %224 = phi i64 [ 0, %exit__15 ], [ %234, %exiting__16 ] + %225 = icmp sle i64 %224, %223 + br i1 %225, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %226 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %224) + %227 = bitcast i8* %226 to { %Array*, %Array* }** + %228 = load { %Array*, %Array* }*, { %Array*, %Array* }** %227, align 8 + %229 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + %231 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 1 + %232 = load %Array*, %Array** %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %232, i32 -1) + %233 = bitcast { %Array*, %Array* }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %233, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %234 = add i64 %224, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %109, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %109, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %110, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %115, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %115, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %121, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %121, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %127, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %127, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %109, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %109, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %115, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %115, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %117, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %121, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %123, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %127, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %127, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 -1) + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %235 = phi i64 [ 0, %exit__16 ], [ %243, %exiting__17 ] + %236 = icmp sle i64 %235, 3 + br i1 %236, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %235) + %238 = bitcast i8* %237 to { i64, %Callable* }** + %239 = load { i64, %Callable* }*, { i64, %Callable* }** %238, align 8 + %240 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %239, i32 0, i32 1 + %241 = load %Callable*, %Callable** %240, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %241, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %241, i32 -1) + %242 = bitcast { i64, %Callable* }* %239 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %242, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %243 = add i64 %235, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 -1) + ret { i64, %Callable* }* %138 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %data, %Array* %termType) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %15) + %18 = bitcast i8* %17 to { %Array*, %Array* }** + %19 = load { %Array*, %Array* }*, { %Array*, %Array* }** %18, align 8 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 0 + %21 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 1) + %24 = bitcast { %Array*, %Array* }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, %Array*, %Array* }* + %28 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 2 + store %Callable* %13, %Callable** %28, align 8 + store %Array* %data, %Array** %29, align 8 + store %Array* %termType, %Array** %30, align 8 + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__13__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__9__FunctionTable, %Tuple* %26) + %32 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %0, %Callable* %31) + %33 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %34) + %37 = bitcast i8* %36 to { %Array*, %Array* }** + %38 = load { %Array*, %Array* }*, { %Array*, %Array* }** %37, align 8 + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 1 + %42 = load %Array*, %Array** %41, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + %43 = bitcast { %Array*, %Array* }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + ret { i64, %Callable* }* %32 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__SumGeneratorSystems__body(%Array* %generatorSystems) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %generatorSystems) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %generatorSystems, i64 %2) + %5 = bitcast i8* %4 to { i64, %Callable* }** + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %generatorSystems, i32 1) + %11 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__AddGeneratorSystems__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %12 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__IdentityGeneratorSystem__body() + %13 = call { i64, %Callable* }* @Microsoft__Quantum__Arrays___a0f596cedd8444258509c1c2bf5316bc_Fold__body(%Callable* %11, { i64, %Callable* }* %12, %Array* %generatorSystems) + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + %16 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %generatorSystems, i64 %17) + %20 = bitcast i8* %19 to { i64, %Callable* }** + %21 = load { i64, %Callable* }*, { i64, %Callable* }** %20, align 8 + %22 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %21, i32 0, i32 1 + %23 = load %Callable*, %Callable** %22, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %23, i32 -1) + %24 = bitcast { i64, %Callable* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %generatorSystems, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + %26 = bitcast { i64, %Callable* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + ret { i64, %Callable* }* %13 +} + +define internal void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___a8e1efd738ea4e35a379366d158d6002_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___a8e1efd738ea4e35a379366d158d6002_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + %4 = sub i64 %3, %1 + %5 = sdiv i64 %4, %2 + %6 = mul i64 %2, %5 + %7 = add i64 %1, %6 + %8 = sub i64 0, %2 + %9 = insertvalue %Range zeroinitializer, i64 %7, 0 + %10 = insertvalue %Range %9, i64 %8, 1 + %11 = insertvalue %Range %10, i64 %1, 2 + %12 = extractvalue %Range %11, 0 + %13 = extractvalue %Range %11, 1 + %14 = extractvalue %Range %11, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %15 = icmp sgt i64 %13, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %12, %preheader__1 ], [ %26, %exiting__1 ] + %16 = icmp sle i64 %__qsVar0__idxQubit__, %14 + %17 = icmp sge i64 %__qsVar0__idxQubit__, %14 + %18 = select i1 %15, i1 %16, i1 %17 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Qubit* }* + %25 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %24, i32 0, i32 0 + store %Qubit* %22, %Qubit** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %26 = add i64 %__qsVar0__idxQubit__, %13 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___a8e1efd738ea4e35a379366d158d6002_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %7 = icmp sgt i64 %5, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %4, %preheader__1 ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idxQubit, %6 + %9 = icmp sge i64 %idxQubit, %6 + %10 = select i1 %7, i1 %8, i1 %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store %Qubit* %14, %Qubit** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxQubit, %5 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___a8e1efd738ea4e35a379366d158d6002_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + %7 = sub i64 %6, %4 + %8 = sdiv i64 %7, %5 + %9 = mul i64 %5, %8 + %10 = add i64 %4, %9 + %11 = sub i64 0, %5 + %12 = insertvalue %Range zeroinitializer, i64 %10, 0 + %13 = insertvalue %Range %12, i64 %11, 1 + %14 = insertvalue %Range %13, i64 %4, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %15, %preheader__1 ], [ %30, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idxQubit__, %17 + %20 = icmp sge i64 %__qsVar0__idxQubit__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, %Qubit* }* + %28 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %28, align 8 + store %Qubit* %25, %Qubit** %29, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %26, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %30 = add i64 %__qsVar0__idxQubit__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSparseMultiConfigurationalState__body(%Callable* %initialStatePreparation, %Array* %excitations, %Array* %qubits) { +entry: + %success = alloca i1, align 1 + %applyFlips = alloca %Array*, align 8 + %coefficientsNewComplexPolar = alloca %Array*, align 8 + %coefficientsSqrtAbs = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 1) + %nExcitations = call i64 @__quantum__rt__array_get_size_1d(%Array* %excitations) + %0 = sub i64 %nExcitations, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %1) + %4 = bitcast i8* %3 to { { double, double }*, %Array* }** + %5 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %4, align 8 + %6 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %5, i32 0, i32 0 + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %5, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { { double, double }*, %Array* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %excitations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %13 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %14 = sub i64 %nExcitations, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %15) + %18 = bitcast i8* %17 to double* + store double 0.000000e+00, double* %18, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %13, %Array** %coefficientsSqrtAbs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %20 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %22 = sub i64 %nExcitations, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %23) + %26 = bitcast i8* %25 to { double, double }** + store { double, double }* %20, { double, double }** %26, align 8 + %27 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %21, %Array** %coefficientsNewComplexPolar, align 8 + %29 = sub i64 %nExcitations, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %30 = phi i64 [ 0, %exit__3 ], [ %36, %exiting__4 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %30) + %33 = bitcast i8* %32 to { double, double }** + %34 = load { double, double }*, { double, double }** %33, align 8 + %35 = bitcast { double, double }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %36 = add i64 %30, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %39 = sub i64 %nExcitations, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %40 = phi i64 [ 0, %exit__4 ], [ %44, %exiting__5 ] + %41 = icmp sle i64 %40, %39 + br i1 %41, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %40) + %43 = bitcast i8* %42 to %Array** + store %Array* %37, %Array** %43, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %44 = add i64 %40, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + store %Array* %38, %Array** %applyFlips, align 8 + %45 = sub i64 %nExcitations, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %46 = phi i64 [ 0, %exit__5 ], [ %51, %exiting__6 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %46) + %49 = bitcast i8* %48 to %Array** + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %51 = add i64 %46, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 1) + %52 = sub i64 %nExcitations, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %idx = phi i64 [ 0, %exit__6 ], [ %94, %exiting__7 ] + %53 = icmp sle i64 %idx, %52 + br i1 %53, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %idx) + %55 = bitcast i8* %54 to { { double, double }*, %Array* }** + %56 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %55, align 8 + %57 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %56, i32 0, i32 0 + %x = load { double, double }*, { double, double }** %57, align 8 + %58 = bitcast { double, double }* %x to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 1) + %59 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %56, i32 0, i32 1 + %excitation = load %Array*, %Array** %59, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 1) + %60 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 -1) + %61 = call %Array* @__quantum__rt__array_copy(%Array* %60, i1 false) + %62 = getelementptr inbounds { double, double }, { double, double }* %x, i32 0, i32 0 + %63 = getelementptr inbounds { double, double }, { double, double }* %x, i32 0, i32 1 + %64 = load double, double* %62, align 8 + %65 = load double, double* %63, align 8 + %66 = call { double, double }* @Microsoft__Quantum__Math__Complex__body(double %64, double %65) + %67 = call { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %66) + %d = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %67) + %68 = call double @__quantum__qis__sqrt__body(double %d) + %69 = bitcast { double, double }* %66 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %69, i32 -1) + %70 = bitcast { double, double }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %70, i32 -1) + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %idx) + %72 = bitcast i8* %71 to double* + store double %68, double* %72, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %61, i32 1) + store %Array* %61, %Array** %coefficientsSqrtAbs, align 8 + %73 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 -1) + %74 = call %Array* @__quantum__rt__array_copy(%Array* %73, i1 false) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %idx) + %76 = bitcast i8* %75 to double* + %77 = load double, double* %76, align 8 + %78 = call { double, double }* @Microsoft__Quantum__Math__Complex__body(double %64, double %65) + %79 = call { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %78) + %80 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %79) + %81 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %77, double %80) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 %idx) + %83 = bitcast i8* %82 to { double, double }** + %84 = bitcast { double, double }* %81 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %84, i32 1) + %85 = load { double, double }*, { double, double }** %83, align 8 + %86 = bitcast { double, double }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 -1) + store { double, double }* %81, { double, double }** %83, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 1) + store %Array* %74, %Array** %coefficientsNewComplexPolar, align 8 + %87 = load %Array*, %Array** %applyFlips, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %87, i32 -1) + %88 = call %Array* @__quantum__rt__array_copy(%Array* %87, i1 false) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 %idx) + %90 = bitcast i8* %89 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %excitation, i32 1) + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %91, i32 -1) + store %Array* %excitation, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + store %Array* %88, %Array** %applyFlips, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 -1) + %92 = bitcast { double, double }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %92, i32 -1) + %93 = bitcast { double, double }* %79 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %93, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %94 = add i64 %idx, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + %95 = sitofp i64 %nExcitations to double + %96 = call double @Microsoft__Quantum__Math__Lg__body(double %95) + %nBitsIndices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %96) + br label %repeat__1 + +repeat__1: ; preds = %fixup__1, %exit__7 + store i1 false, i1* %success, align 1 + %97 = add i64 %nBitsIndices, 1 + %auxillary = call %Array* @__quantum__rt__qubit_allocate_array(i64 %97) + call void @__quantum__rt__array_update_alias_count(%Array* %auxillary, i32 1) + %flag = call %Qubit* @__quantum__rt__qubit_allocate() + %98 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PrepareSingleConfigurationalStateSingleSiteOccupation____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %99 = load %Array*, %Array** %applyFlips, align 8 + %100 = call %Array* @Microsoft__Quantum__Arrays___2f6981dc34504975878034851493f61b_Mapped__body(%Callable* %98, %Array* %99) + %101 = call %Callable* @Microsoft__Quantum__Arrays___5f8f6915612f46e7aec067f52ac7aceb_LookupFunction__body(%Array* %100) + %multiplexer = call %Callable* @Microsoft__Quantum__Canon__MultiplexerBruteForceFromGenerator__body(i64 %nExcitations, %Callable* %101) + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 1) + %102 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + %103 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__body(%Array* %102, { %Array* }* %103) + %104 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %105 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %106 = bitcast %Tuple* %105 to { { %Array* }*, %Array* }* + %107 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %106, i32 0, i32 0 + %108 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %106, i32 0, i32 1 + store { %Array* }* %104, { %Array* }** %107, align 8 + store %Array* %qubits, %Array** %108, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %multiplexer, %Tuple* %105, %Tuple* null) + %109 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + %110 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %109, { %Array* }* %110) + %111 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %112 = call %Callable* @Microsoft__Quantum__Canon___5677538337b642fcacbb1c21675a13c0_ControlledOnInt__body(i64 0, %Callable* %111) + call void @__quantum__rt__array_update_reference_count(%Array* %auxillary, i32 1) + %113 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %114 = bitcast %Tuple* %113 to { %Array*, %Qubit* }* + %115 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %114, i32 0, i32 0 + %116 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %114, i32 0, i32 1 + store %Array* %auxillary, %Array** %115, align 8 + store %Qubit* %flag, %Qubit** %116, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %112, %Tuple* %113, %Tuple* null) + %outcome = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %flag) + %117 = call %Result* @__quantum__rt__result_get_one() + %118 = call i1 @__quantum__rt__result_equal(%Result* %outcome, %Result* %117) + store i1 %118, i1* %success, align 1 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %auxillary) + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %flag) + %119 = getelementptr inbounds { %Array* }, { %Array* }* %103, i32 0, i32 0 + %120 = load %Array*, %Array** %119, align 8 + %121 = getelementptr inbounds { %Array* }, { %Array* }* %104, i32 0, i32 0 + %122 = load %Array*, %Array** %121, align 8 + %123 = getelementptr inbounds { %Array* }, { %Array* }* %110, i32 0, i32 0 + %124 = load %Array*, %Array** %123, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %98, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %98, i32 -1) + %125 = call i64 @__quantum__rt__array_get_size_1d(%Array* %100) + %126 = sub i64 %125, 1 + br label %header__8 + +until__1: ; preds = %exit__8 + br i1 %118, label %rend__1, label %fixup__1 + +fixup__1: ; preds = %until__1 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) + br label %repeat__1 + +rend__1: ; preds = %until__1 + %127 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + %128 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + %129 = load %Array*, %Array** %applyFlips, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + %130 = sub i64 %nExcitations, 1 + br label %header__9 + +header__8: ; preds = %exiting__8, %repeat__1 + %131 = phi i64 [ 0, %repeat__1 ], [ %136, %exiting__8 ] + %132 = icmp sle i64 %131, %126 + br i1 %132, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 %131) + %134 = bitcast i8* %133 to %Callable** + %135 = load %Callable*, %Callable** %134, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %135, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %135, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %136 = add i64 %131, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + %137 = bitcast { %Array* }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %137, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %122, i32 -1) + %138 = bitcast { %Array* }* %104 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %105, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %124, i32 -1) + %139 = bitcast { %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %139, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %112, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %112, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxillary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %113, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %outcome, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %flag) + call void @__quantum__rt__array_update_alias_count(%Array* %auxillary, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %auxillary) + br label %until__1 + +header__9: ; preds = %exiting__9, %rend__1 + %140 = phi i64 [ 0, %rend__1 ], [ %151, %exiting__9 ] + %141 = icmp sle i64 %140, %130 + br i1 %141, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %140) + %143 = bitcast i8* %142 to { { double, double }*, %Array* }** + %144 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %143, align 8 + %145 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %144, i32 0, i32 0 + %146 = load { double, double }*, { double, double }** %145, align 8 + %147 = bitcast { double, double }* %146 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %147, i32 -1) + %148 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %144, i32 0, i32 1 + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 -1) + %150 = bitcast { { double, double }*, %Array* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %150, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %151 = add i64 %140, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %excitations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 -1) + %152 = call i64 @__quantum__rt__array_get_size_1d(%Array* %128) + %153 = sub i64 %152, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %154 = phi i64 [ 0, %exit__9 ], [ %160, %exiting__10 ] + %155 = icmp sle i64 %154, %153 + br i1 %155, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %156 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %154) + %157 = bitcast i8* %156 to { double, double }** + %158 = load { double, double }*, { double, double }** %157, align 8 + %159 = bitcast { double, double }* %158 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %160 = add i64 %154, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %128, i32 -1) + %161 = call i64 @__quantum__rt__array_get_size_1d(%Array* %129) + %162 = sub i64 %161, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %163) + %166 = bitcast i8* %165 to %Array** + %167 = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %129, i32 -1) + %169 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %169, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %127, i32 -1) + %170 = sub i64 %152, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %177, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %171) + %174 = bitcast i8* %173 to { double, double }** + %175 = load { double, double }*, { double, double }** %174, align 8 + %176 = bitcast { double, double }* %175 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %176, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %177 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %128, i32 -1) + %178 = sub i64 %161, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %179 = phi i64 [ 0, %exit__12 ], [ %184, %exiting__13 ] + %180 = icmp sle i64 %179, %178 + br i1 %180, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %179) + %182 = bitcast i8* %181 to %Array** + %183 = load %Array*, %Array** %182, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %183, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %184 = add i64 %179, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 -1) + ret void +} + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %Magnitude, double %Argument) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Magnitude, double* %2, align 8 + store double %Argument, double* %3, align 8 + ret { double, double }* %1 +} + +define internal double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call double @Microsoft__Quantum__Math__AbsComplex__body({ double, double }* %input) + %2 = call double @Microsoft__Quantum__Math__ArgComplex__body({ double, double }* %input) + %3 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %1, double %2) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret { double, double }* %3 +} + +define internal { double, double }* @Microsoft__Quantum__Math__Complex__body(double %Real, double %Imag) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Real, double* %2, align 8 + store double %Imag, double* %3, align 8 + ret { double, double }* %1 +} + +declare double @__quantum__qis__sqrt__body(double) + +define internal double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +define internal %Callable* @Microsoft__Quantum__Canon__MultiplexerBruteForceFromGenerator__body(i64 %0, %Callable* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %unitaryGenerator = bitcast %Tuple* %2 to { i64, %Callable* }* + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %4 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store %Callable* %1, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Callable* }* }* getelementptr ({ %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store %Callable* %5, %Callable** %8, align 8 + store { i64, %Callable* }* %unitaryGenerator, { i64, %Callable* }** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__26__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__12__FunctionTable, %Tuple* %6) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret %Callable* %10 +} + +define internal %Callable* @Microsoft__Quantum__Arrays___5f8f6915612f46e7aec067f52ac7aceb_LookupFunction__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___7d83b54afca94675b63617b69b56aa7a_ElementAt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %array, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %array, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__51__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__27__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Callable* %20 +} + +define internal %Array* @Microsoft__Quantum__Arrays___2f6981dc34504975878034851493f61b_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %7 = icmp eq i64 %length, 0 + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %9 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %11 = bitcast i8* %10 to %Array** + %12 = load %Array*, %Array** %11, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %12, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %13, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { %Callable* }* + %18 = getelementptr inbounds { %Callable* }, { %Callable* }* %17, i32 0, i32 0 + %first = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %first, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %first, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %20 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %21 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %9 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %8 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %20 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %27) + %30 = bitcast i8* %29 to %Callable** + store %Callable* %first, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %first, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %first, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %19, %Array** %retval, align 8 + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %33 = phi i64 [ 0, %exit__3 ], [ %38, %exiting__4 ] + %34 = icmp sle i64 %33, %32 + br i1 %34, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %33) + %36 = bitcast i8* %35 to %Callable** + %37 = load %Callable*, %Callable** %36, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %37, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %37, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %38 = add i64 %33, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %39 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %56, %exiting__5 ] + %40 = icmp sle i64 %idx, %39 + br i1 %40, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %41 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %42 = call %Array* @__quantum__rt__array_copy(%Array* %41, i1 false) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %44 = bitcast i8* %43 to %Array** + %45 = load %Array*, %Array** %44, align 8 + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Array* }* + %48 = getelementptr inbounds { %Array* }, { %Array* }* %47, i32 0, i32 0 + store %Array* %45, %Array** %48, align 8 + %49 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %46, %Tuple* %49) + %50 = bitcast %Tuple* %49 to { %Callable* }* + %51 = getelementptr inbounds { %Callable* }, { %Callable* }* %50, i32 0, i32 0 + %52 = load %Callable*, %Callable** %51, align 8 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %idx) + %54 = bitcast i8* %53 to %Callable** + call void @__quantum__rt__capture_update_alias_count(%Callable* %52, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %52, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 1) + %55 = load %Callable*, %Callable** %54, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %55, i32 -1) + store %Callable* %52, %Callable** %54, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + store %Array* %42, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %56 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %57 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %58 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %59 = phi i64 [ 0, %exit__5 ], [ %64, %exiting__6 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %59) + %62 = bitcast i8* %61 to %Array** + %63 = load %Array*, %Array** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %63, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %64 = add i64 %59, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %first, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %first, i32 -1) + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %57) + %66 = sub i64 %65, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %67 = phi i64 [ 0, %exit__6 ], [ %72, %exiting__7 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %57, i64 %67) + %70 = bitcast i8* %69 to %Callable** + %71 = load %Callable*, %Callable** %70, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %71, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %71, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %72 = add i64 %67, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %first, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret %Array* %57 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PrepareSingleConfigurationalStateSingleSiteOccupation____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Callable* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___PrepareSingleConfigurationalStateSingleSiteOccupation____body(%Array* %2) + %4 = bitcast %Tuple* %result-tuple to { %Callable* }* + %5 = getelementptr inbounds { %Callable* }, { %Callable* }* %4, i32 0, i32 0 + store %Callable* %3, %Callable** %5, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__body(%Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %__Item1__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %__Item1__, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__Item1__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 -1) + ret { %Array* }* %1 +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__adj(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___5677538337b642fcacbb1c21675a13c0_ControlledOnInt__body(i64 %numberState, %Callable* %oracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i64, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store i64 %numberState, i64* %4, align 4 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__40__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__17__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define internal void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +define internal i1 @Microsoft__Quantum__Arrays___0fcd31919d144fe58f058d4e79e5219d_IsEmpty__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %14 = icmp eq i64 %0, 0 + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to { { double, double }*, %Array* }** + %20 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 0 + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 -1) + %24 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + %26 = bitcast { { double, double }*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %14 +} + +define internal void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__body(%Array* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__adj(%Array* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__ctl(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__ctladj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Array* }* + %7 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Array* %4, %Array** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Array* }* + %10 = getelementptr inbounds { %Array* }, { %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %13, i32 0, i32 1 + store { i64, %Array* }* %6, { i64, %Array* }** %14, align 8 + store %Array* %11, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @MemoryManagement__6__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__6__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareUnitaryCoupledClusterState__body(%Callable* %initialStatePreparation, %Array* %clusterOperator, double %trotterStepSize, %Array* %qubits) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %clusterOperator) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %clusterOperator, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %clusterOperator, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %clusterOperatorGeneratorSystem = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorGeneratorSystem__body(%Array* %clusterOperator) + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %clusterOperatorGeneratorSystem, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 1) + %16 = bitcast { i64, %Callable* }* %clusterOperatorGeneratorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorEvolutionSet__body() + %evolutionGenerator = call { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %17, { i64, %Callable* }* %clusterOperatorGeneratorSystem) + %18 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %19 = load { %Callable* }*, { %Callable* }** %18, align 8 + %20 = getelementptr inbounds { %Callable* }, { %Callable* }* %19, i32 0, i32 0 + %21 = load %Callable*, %Callable** %20, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 1) + %22 = bitcast { %Callable* }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + %23 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %24 = load { i64, %Callable* }*, { i64, %Callable* }** %23, align 8 + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 1) + %27 = bitcast { i64, %Callable* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + %28 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + %29 = call { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 1) + %30 = getelementptr inbounds { %Callable* }, { %Callable* }* %29, i32 0, i32 0 + %simulationAlgorithm = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %33 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 1 + %35 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 2 + store %Callable* %simulationAlgorithm, %Callable** %33, align 8 + store double 1.000000e+00, double* %34, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %35, align 8 + %oracle = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__7__FunctionTable, %Tuple* %31) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array* }* + %38 = getelementptr inbounds { %Array* }, { %Array* }* %37, i32 0, i32 0 + store %Array* %qubits, %Array** %38, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %initialStatePreparation, %Tuple* %36, %Tuple* null) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Array* }* + %41 = getelementptr inbounds { %Array* }, { %Array* }* %40, i32 0, i32 0 + store %Array* %qubits, %Array** %41, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %oracle, %Tuple* %39, %Tuple* null) + %42 = getelementptr inbounds { %Callable* }, { %Callable* }* %17, i32 0, i32 0 + %43 = load %Callable*, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + %44 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %45 = phi i64 [ 0, %exit__1 ], [ %56, %exiting__2 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %clusterOperator, i64 %45) + %48 = bitcast i8* %47 to { { double, double }*, %Array* }** + %49 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %49, i32 0, i32 0 + %51 = load { double, double }*, { double, double }** %50, align 8 + %52 = bitcast { double, double }* %51 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + %53 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %49, i32 0, i32 1 + %54 = load %Array*, %Array** %53, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %54, i32 -1) + %55 = bitcast { { double, double }*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %56 = add i64 %45, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %clusterOperator, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %43, i32 -1) + %57 = bitcast { %Callable* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %57, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + %58 = bitcast { %Callable* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %58, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__body(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__adj(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__ctl(%Array* %__controlQubits__, %Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___69173d15e6dc43acae43a0cef86fdeba_NoOp__ctladj(%Array* %__controlQubits__, %Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %__Item1__, { i64, %Callable* }* %__Item2__) { +entry: + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__Item2__, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { i64, %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }*, { i64, %Callable* }* }* getelementptr ({ { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Callable* }*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store { %Callable* }* %__Item1__, { %Callable* }** %8, align 8 + store { i64, %Callable* }* %__Item2__, { i64, %Callable* }** %9, align 8 + %10 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__Item2__, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 1) + %14 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 1) + %15 = bitcast { i64, %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + ret { { %Callable* }*, { i64, %Callable* }* }* %7 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 %trotterOrder) { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, double, i64 }* + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store double %trotterStepSize, double* %4, align 8 + store i64 %trotterOrder, i64* %5, align 4 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__59__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__35__FunctionTable, %Tuple* %1) + %7 = call { %Callable* }* @Microsoft__Quantum__Simulation__SimulationAlgorithm__body(%Callable* %6) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + ret { %Callable* }* %7 +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @MemoryManagement__7__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__7__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, i64, %Array* }* + %10 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, i64, %Array* }* + %10 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, i64 }* + %6 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, i64, %Array* }* + %12 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %11, i32 0, i32 2 + store %Callable* %7, %Callable** %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, i64, %Array* }* }* getelementptr ({ %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { %Callable*, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { %Callable*, i64, %Array* }* %11, { %Callable*, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, i64 }* + %6 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, i64, %Array* }* + %12 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %11, i32 0, i32 2 + store %Callable* %7, %Callable** %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, i64, %Array* }* }* getelementptr ({ %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { %Callable*, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { %Callable*, i64, %Array* }* %11, { %Callable*, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____body(%Callable* %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____adj(%Callable* %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, i64, %Array* }*, { %Callable*, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____ctl(%Array* %3, { %Callable*, i64, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, i64, %Array* }*, { %Callable*, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef0___MergeTwoRegisters_____ctladj(%Array* %3, { %Callable*, i64, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__8__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__8__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, i64 }* + %1 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, i64 }, { %Callable*, %Callable*, i64 }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body(%Array* %data, %Array* %termType, i64 %idx) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %idx) + %14 = bitcast i8* %13 to { %Array*, %Array* }** + %15 = load { %Array*, %Array* }*, { %Array*, %Array* }** %14, align 8 + %16 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %15, %Array* %termType) + %17 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %18) + %21 = bitcast i8* %20 to { %Array*, %Array* }** + %22 = load { %Array*, %Array* }*, { %Array*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %22, i32 0, i32 0 + %24 = load %Array*, %Array** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %24, i32 -1) + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %22, i32 0, i32 1 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 -1) + %27 = bitcast { %Array*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %16 +} + +define internal void @Lifted__PartialApplication__13__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { i64 }* + %6 = getelementptr inbounds { i64 }, { i64 }* %5, i32 0, i32 0 + %7 = load i64, i64* %6, align 4 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, i64 }* getelementptr ({ %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array*, i64 }* + %10 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 2 + store %Array* %2, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + store i64 %7, i64* %12, align 4 + %13 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load i64, i64* %3, align 4 + %7 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body(%Array* %4, %Array* %5, i64 %6) + %8 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %8, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %9, align 8 + ret void +} + +define internal void @MemoryManagement__9__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { %Array*, %Array* }** + %11 = load { %Array*, %Array* }*, { %Array*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %18 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__9__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { %Array*, %Array* }** + %11 = load { %Array*, %Array* }*, { %Array*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %18 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Math__AbsD__body(double %a) { +entry: + %0 = fcmp olt double %a, 0.000000e+00 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = fneg double %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi double [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret double %2 +} + +define internal double @Microsoft__Quantum__Math__PowD__body(double %x, double %y) { +entry: + %0 = call double @llvm.pow.f64(double %x, double %y) + ret double %0 +} + +define internal { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef2__ExtendedTruncation____body(double %value) { +entry: + %truncated = fptosi double %value to i64 + %0 = sitofp i64 %truncated to double + %1 = fsub double %0, %value + %2 = fcmp oge double %value, 0.000000e+00 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, i1 }* getelementptr ({ i64, double, i1 }, { i64, double, i1 }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i64, double, i1 }* + %5 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 2 + store i64 %truncated, i64* %5, align 4 + store double %1, double* %6, align 8 + store i1 %2, i1* %7, align 1 + ret { i64, double, i1 }* %4 +} + +define internal double @Microsoft__Quantum__Math__AbsComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %d = call double @Microsoft__Quantum__Math__AbsSquaredComplex__body({ double, double }* %input) + %1 = call double @__quantum__qis__sqrt__body(double %d) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %1 +} + +define internal double @Microsoft__Quantum__Math__AbsSquaredComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %real = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %imaginary = load double, double* %2, align 8 + %3 = fmul double %real, %real + %4 = fmul double %imaginary, %imaginary + %5 = fadd double %3, %4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %5 +} + +define internal i64 @Microsoft__Quantum__Math__AbsI__body(i64 %a) { +entry: + %0 = icmp slt i64 %a, 0 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = sub i64 0, %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi i64 [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret i64 %2 +} + +define internal double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { +entry: + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + ret double %0 +} + +declare double @__quantum__qis__arctan2__body(double, double) + +define internal double @Microsoft__Quantum__Math__ArgComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %real = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %imaginary = load double, double* %2, align 8 + %3 = call double @__quantum__qis__arctan2__body(double %imaginary, double %real) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %3 +} + +define internal double @Microsoft__Quantum__Math__Cos__body(double %theta) { +entry: + %0 = call double @__quantum__qis__cos__body(double %theta) + ret double %0 +} + +declare double @__quantum__qis__cos__body(double) + +declare double @__quantum__qis__log__body(double) + +define internal double @Microsoft__Quantum__Math__LogOf2__body() { +entry: + ret double 0x3FE62E42FEFA39EF +} + +define internal double @Microsoft__Quantum__Math__Log__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + ret double %0 +} + +define internal i64 @Microsoft__Quantum__Math__MaxI__body(i64 %a, i64 %b) { +entry: + %0 = icmp sgt i64 %a, %b + %1 = select i1 %0, i64 %a, i64 %b + ret i64 %1 +} + +define internal i64 @Microsoft__Quantum__Math__MinI__body(i64 %a, i64 %b) { +entry: + %0 = icmp slt i64 %a, %b + %1 = select i1 %0, i64 %a, i64 %b + ret i64 %1 +} + +define internal double @Microsoft__Quantum__Math__PI__body() { +entry: + ret double 0x400921FB54442D18 +} + +define internal double @Microsoft__Quantum__Math__PNorm__body(double %p, %Array* %array) { +entry: + %norm = alloca double, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = fcmp ole double %p, 0.000000e+00 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([59 x i8], [59 x i8]* @8, i32 0, i32 0)) + %2 = call %String* @__quantum__rt__double_to_string(double %p) + %3 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %2) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + %4 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @9, i32 0, i32 0)) + %5 = call %String* @__quantum__rt__string_concatenate(%String* %3, %String* %4) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__fail(%String* %5) + unreachable + +continue__1: ; preds = %entry + store double 0.000000e+00, double* %norm, align 8 + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %7 = sub i64 %6, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %8 = phi i64 [ 0, %continue__1 ], [ %16, %exiting__1 ] + %9 = icmp sle i64 %8, %7 + br i1 %9, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %8) + %11 = bitcast i8* %10 to double* + %element = load double, double* %11, align 8 + %12 = load double, double* %norm, align 8 + %13 = call double @Microsoft__Quantum__Math__AbsD__body(double %element) + %14 = call double @Microsoft__Quantum__Math__PowD__body(double %13, double %p) + %15 = fadd double %12, %14 + store double %15, double* %norm, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %16 = add i64 %8, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %17 = load double, double* %norm, align 8 + %18 = fdiv double 1.000000e+00, %p + %19 = call double @Microsoft__Quantum__Math__PowD__body(double %17, double %18) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret double %19 +} + +declare %String* @__quantum__rt__double_to_string(double) + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.pow.f64(double, double) #0 + +define internal double @Microsoft__Quantum__Math__RealMod__body(double %value, double %modulo, double %minValue) { +entry: + %0 = call double @Microsoft__Quantum__Math__PI__body() + %1 = fmul double 2.000000e+00, %0 + %2 = fsub double %value, %minValue + %3 = fdiv double %2, %modulo + %4 = fsub double %3, 5.000000e-01 + %fractionalValue = fmul double %1, %4 + %cosFracValue = call double @__quantum__qis__cos__body(double %fractionalValue) + %sinFracValue = call double @__quantum__qis__sin__body(double %fractionalValue) + %5 = call double @__quantum__qis__arctan2__body(double %sinFracValue, double %cosFracValue) + %6 = call double @Microsoft__Quantum__Math__PI__body() + %7 = fmul double 2.000000e+00, %6 + %8 = fdiv double %5, %7 + %moduloValue = fadd double 5.000000e-01, %8 + %9 = fmul double %moduloValue, %modulo + %output = fadd double %9, %minValue + ret double %output +} + +define internal double @Microsoft__Quantum__Math__Sin__body(double %theta) { +entry: + %0 = call double @__quantum__qis__sin__body(double %theta) + ret double %0 +} + +define internal double @Microsoft__Quantum__Math__Sqrt__body(double %d) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double %d) + ret double %0 +} + +define internal i64 @Microsoft__Quantum__Convert__BoolArrayAsInt__body(%Array* %bits) { +entry: + %number = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = icmp slt i64 %nBits, 64 + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @10, i32 0, i32 0)) + %2 = call %String* @__quantum__rt__int_to_string(i64 %nBits) + %3 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %2) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + %4 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @9, i32 0, i32 0)) + %5 = call %String* @__quantum__rt__string_concatenate(%String* %3, %String* %4) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %0, %String* %5) + store i64 0, i64* %number, align 4 + %6 = sub i64 %nBits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxBit = phi i64 [ 0, %entry ], [ %16, %exiting__1 ] + %7 = icmp sle i64 %idxBit, %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bits, i64 %idxBit) + %9 = bitcast i8* %8 to i1* + %10 = load i1, i1* %9, align 1 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %11 = load i64, i64* %number, align 4 + %12 = trunc i64 %idxBit to i32 + %13 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %12) + %14 = fptosi double %13 to i64 + %15 = add i64 %11, %14 + store i64 %15, i64* %number, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %16 = add i64 %idxBit, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %17 = load i64, i64* %number, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %5, i32 -1) + ret i64 %17 +} + +define internal void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %actual, %String* %message) { +entry: + %0 = xor i1 %actual, true + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__string_update_reference_count(%String* %message, i32 1) + call void @__quantum__rt__fail(%String* %message) + unreachable + +continue__1: ; preds = %entry + ret void +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.powi.f64.i32(double, i32) #0 + +define internal %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %number, i64 %bits) { +entry: + %tempInt = alloca i64, align 8 + %outputBits = alloca %Array*, align 8 + %0 = icmp sge i64 %bits, 0 + br i1 %0, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %1 = icmp sle i64 %bits, 63 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %2 = phi i1 [ %1, %condTrue__1 ], [ %0, %entry ] + %3 = trunc i64 %bits to i32 + %4 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %3) + %5 = fptosi double %4 to i64 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([33 x i8], [33 x i8]* @11, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__int_to_string(i64 %5) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %8) + %9 = icmp slt i64 %bits, 63 + br i1 %9, label %condTrue__2, label %condFalse__1 + +condTrue__2: ; preds = %condContinue__1 + %10 = shl i64 1, %bits + br label %condContinue__2 + +condFalse__1: ; preds = %condContinue__1 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__1, %condTrue__2 + %max = phi i64 [ %10, %condTrue__2 ], [ 9223372036854775807, %condFalse__1 ] + %11 = icmp sge i64 %number, 0 + br i1 %11, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condContinue__2 + %12 = icmp sle i64 %number, %max + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condContinue__2 + %13 = phi i1 [ %12, %condTrue__3 ], [ %11, %condContinue__2 ] + %14 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([34 x i8], [34 x i8]* @12, i32 0, i32 0)) + %15 = call %String* @__quantum__rt__int_to_string(i64 %bits) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @13, i32 0, i32 0)) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__int_to_string(i64 %number) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @9, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %13, %String* %22) + %23 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %bits) + %24 = sub i64 %bits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %condContinue__3 + %25 = phi i64 [ 0, %condContinue__3 ], [ %29, %exiting__1 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %25) + %28 = bitcast i8* %27 to i1* + store i1 false, i1* %28, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %29 = add i64 %25, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %23, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + store i64 %number, i64* %tempInt, align 4 + %30 = sub i64 %bits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %41, %exiting__2 ] + %31 = icmp sle i64 %idxBit, %30 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = load i64, i64* %tempInt, align 4 + %35 = srem i64 %34, 2 + %36 = icmp eq i64 %35, 0 + %37 = select i1 %36, i1 false, i1 true + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxBit) + %39 = bitcast i8* %38 to i1* + store i1 %37, i1* %39, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %outputBits, align 8 + %40 = sdiv i64 %34, 2 + store i64 %40, i64* %tempInt, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %41 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %42 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + ret %Array* %42 +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__body(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__adj(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__h__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +define internal void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__r__body(i2, double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__adj(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__r__adj(i2, double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, double, %Qubit* }* + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 2 + store i2 %pauli, i2* %6, align 1 + store double %theta, double* %7, align 8 + store %Qubit* %qubit, %Qubit** %8, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__r__ctl(%Array*, { i2, double, %Qubit* }*) + +define internal void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, double, %Qubit* }* + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 2 + store i2 %pauli, i2* %6, align 1 + store double %theta, double* %7, align 8 + store %Qubit* %qubit, %Qubit** %8, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__r__ctladj(%Array*, { i2, double, %Qubit* }*) + +define internal void @Microsoft__Quantum__Intrinsic__R1__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 -2, double %theta, %Qubit* %qubit) + %theta__2 = fneg double %theta + call void @__quantum__qis__r__body(i2 0, double %theta__2, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__adj(double %theta, %Qubit* %qubit) { +entry: + %theta__1 = fneg double %theta + call void @__quantum__qis__r__adj(i2 0, double %theta__1, %Qubit* %qubit) + call void @__quantum__qis__r__adj(i2 -2, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 -2, i2* %5, align 1 + store double %theta, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %theta__2 = fneg double %theta + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { i2, double, %Qubit* }* + %10 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %9, i32 0, i32 2 + store i2 0, i2* %10, align 1 + store double %theta__2, double* %11, align 8 + store %Qubit* %qubit, %Qubit** %12, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %9) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %theta__1 = fneg double %theta + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 0, i2* %5, align 1 + store double %theta__1, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { i2, double, %Qubit* }* + %10 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %9, i32 0, i32 2 + store i2 -2, i2* %10, align 1 + store double %theta, double* %11, align 8 + store %Qubit* %qubit, %Qubit** %12, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %9) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 %pauli, i64 %numerator, i64 %power, %Qubit* %qubit) { +entry: + %0 = call double @Microsoft__Quantum__Math__PI__body() + %1 = fmul double -2.000000e+00, %0 + %2 = sitofp i64 %numerator to double + %3 = fmul double %1, %2 + %4 = sitofp i64 %power to double + %5 = call double @llvm.pow.f64(double 2.000000e+00, double %4) + %angle = fdiv double %3, %5 + call void @__quantum__qis__r__body(i2 %pauli, double %angle, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__RFrac__adj(i2 %pauli, i64 %numerator, i64 %power, %Qubit* %qubit) { +entry: + %0 = call double @Microsoft__Quantum__Math__PI__body() + %1 = fmul double -2.000000e+00, %0 + %2 = sitofp i64 %numerator to double + %3 = fmul double %1, %2 + %4 = sitofp i64 %power to double + %5 = call double @llvm.pow.f64(double 2.000000e+00, double %4) + %__qsVar0__angle__ = fdiv double %3, %5 + call void @__quantum__qis__r__adj(i2 %pauli, double %__qsVar0__angle__, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__RFrac__ctl(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 1 + %numerator = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 2 + %power = load i64, i64* %3, align 4 + %4 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 3 + %qubit = load %Qubit*, %Qubit** %4, align 8 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fmul double -2.000000e+00, %5 + %7 = sitofp i64 %numerator to double + %8 = fmul double %6, %7 + %9 = sitofp i64 %power to double + %10 = call double @llvm.pow.f64(double 2.000000e+00, double %9) + %angle = fdiv double %8, %10 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, double, %Qubit* }* + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 2 + store i2 %pauli, i2* %13, align 1 + store double %angle, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__RFrac__ctladj(%Array* %__controlQubits__, { i2, i64, i64, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 1 + %numerator = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 2 + %power = load i64, i64* %3, align 4 + %4 = getelementptr inbounds { i2, i64, i64, %Qubit* }, { i2, i64, i64, %Qubit* }* %0, i32 0, i32 3 + %qubit = load %Qubit*, %Qubit** %4, align 8 + %5 = call double @Microsoft__Quantum__Math__PI__body() + %6 = fmul double -2.000000e+00, %5 + %7 = sitofp i64 %numerator to double + %8 = fmul double %6, %7 + %9 = sitofp i64 %power to double + %10 = call double @llvm.pow.f64(double 2.000000e+00, double %9) + %__qsVar0__angle__ = fdiv double %8, %10 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, double, %Qubit* }* + %13 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %12, i32 0, i32 2 + store i2 %pauli, i2* %13, align 1 + store double %__qsVar0__angle__, double* %14, align 8 + store %Qubit* %qubit, %Qubit** %15, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 -2, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qubit) { +entry: + %theta__1 = fneg double %theta + call void @__quantum__qis__r__body(i2 -2, double %theta__1, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 -2, i2* %5, align 1 + store double %theta, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %theta__1 = fneg double %theta + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 -2, i2* %5, align 1 + store double %theta__1, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__s__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__s__adj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__s__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__s__ctladj(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__t__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__adj(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__t__adj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__t__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__t__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__t__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__t__ctladj(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__y__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__y__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__z__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal i64 @Microsoft__Quantum__Canon____QsRef3__Angle____body(i64 %index) { +entry: + %0 = call i64 @Microsoft__Quantum__Canon____QsRef3__HammingWeightI____body(i64 %index) + %1 = srem i64 %0, 2 + %2 = icmp eq i64 %1, 1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %3 = phi i64 [ 1, %condTrue__1 ], [ -1, %condFalse__1 ] + ret i64 %3 +} + +define internal i64 @Microsoft__Quantum__Canon____QsRef3__HammingWeightI____body(i64 %number) { +entry: + %cnt = alloca i64, align 8 + store i64 %number, i64* %cnt, align 4 + %0 = and i64 %number, 6148914691236517205 + %1 = ashr i64 %number, 1 + %2 = and i64 %1, 6148914691236517205 + %3 = add i64 %0, %2 + store i64 %3, i64* %cnt, align 4 + %4 = and i64 %3, 3689348814741910323 + %5 = ashr i64 %3, 2 + %6 = and i64 %5, 3689348814741910323 + %7 = add i64 %4, %6 + store i64 %7, i64* %cnt, align 4 + %8 = and i64 %7, 1085102592571150095 + %9 = ashr i64 %7, 4 + %10 = and i64 %9, 1085102592571150095 + %11 = add i64 %8, %10 + store i64 %11, i64* %cnt, align 4 + %12 = and i64 %11, 71777214294589695 + %13 = ashr i64 %11, 8 + %14 = and i64 %13, 71777214294589695 + %15 = add i64 %12, %14 + store i64 %15, i64* %cnt, align 4 + %16 = and i64 %15, 281470681808895 + %17 = ashr i64 %15, 16 + %18 = and i64 %17, 281470681808895 + %19 = add i64 %16, %18 + store i64 %19, i64* %cnt, align 4 + %20 = and i64 %19, 4294967295 + %21 = ashr i64 %19, 32 + %22 = and i64 %21, 4294967295 + %23 = add i64 %20, %22 + store i64 %23, i64* %cnt, align 4 + ret i64 %23 +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceCP____body(double %tolerance, %Array* %coefficients) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %10) + %13 = bitcast i8* %12 to { double, double }** + %coefficient = load { double, double }*, { double, double }** %13, align 8 + %14 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %coefficient) + %16 = fcmp ogt double %15, %tolerance + br i1 %16, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %17 = sub i64 %0, 1 + br label %header__3 + +continue__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = sub i64 %0, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__3 ] + %21 = icmp sle i64 %20, %17 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %26 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +header__4: ; preds = %exiting__4, %exit__2 + %27 = phi i64 [ 0, %exit__2 ], [ %33, %exiting__4 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %27) + %30 = bitcast i8* %29 to { double, double }** + %31 = load { double, double }*, { double, double }** %30, align 8 + %32 = bitcast { double, double }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %33 = add i64 %27, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to double* + %coefficient = load double, double* %5, align 8 + %6 = call double @Microsoft__Quantum__Math__AbsD__body(double %coefficient) + %7 = fcmp oge double %6, %tolerance + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +continue__1: ; preds = %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal void @Microsoft__Quantum__Canon____QsRef3__ApplyAndChain____body(%Array* %auxRegister, %Array* %ctrlRegister, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %auxRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctrlRegister) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctrlRegister) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %ctrlRegister) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %4, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %test1__1 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %auxRegister) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctrlRegister) + %7 = sub i64 %6, 2 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @14, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactI__body(i64 %5, i64 %7, %String* %8) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctrlRegister, i64 0) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 0) + %14 = bitcast i8* %13 to %Qubit** + store %Qubit* %11, %Qubit** %14, align 8 + %controls1 = call %Array* @__quantum__rt__array_concatenate(%Array* %12, %Array* %auxRegister) + call void @__quantum__rt__array_update_reference_count(%Array* %controls1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls1, i32 1) + %controls2 = call %Array* @Microsoft__Quantum__Arrays___8eccbbf2f2c44c66bcf118fa86e46f90_Rest__body(%Array* %ctrlRegister) + call void @__quantum__rt__array_update_alias_count(%Array* %controls2, i32 1) + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 0) + %17 = bitcast i8* %16 to %Qubit** + store %Qubit* %target, %Qubit** %17, align 8 + %targets = call %Array* @__quantum__rt__array_concatenate(%Array* %auxRegister, %Array* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %targets, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targets, i32 1) + %18 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyAnd__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %19 = call %Array* @Microsoft__Quantum__Arrays___149f6d26e7564d73ab41ea09c9b1a83d_Zipped3__body(%Array* %controls1, %Array* %controls2, %Array* %targets) + call void @Microsoft__Quantum__Canon___9454df5e02c74932b4fb0c010e9b8d13_ApplyToEachA__body(%Callable* %18, %Array* %19) + call void @__quantum__rt__array_update_alias_count(%Array* %controls1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targets, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controls1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controls1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controls2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targets, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targets, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %20 = call i64 @__quantum__rt__array_get_size_1d(%Array* %19) + %21 = sub i64 %20, 1 + br label %header__1 + +continue__1: ; preds = %exit__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %auxRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrlRegister, i32 -1) + ret void + +header__1: ; preds = %exiting__1, %else__1 + %22 = phi i64 [ 0, %else__1 ], [ %28, %exiting__1 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %22) + %25 = bitcast i8* %24 to { %Qubit*, %Qubit*, %Qubit* }** + %26 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %25, align 8 + %27 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %28 = add i64 %22, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + br label %continue__1 +} + +define internal %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp sgt i64 %0, 0 + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @33, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %1, i1 true, %String* %2) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %4 = bitcast i8* %3 to %Qubit** + %5 = load %Qubit*, %Qubit** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + ret %Qubit* %5 +} + +define internal void @Microsoft__Quantum__Diagnostics__EqualityFactI__body(i64 %actual, i64 %expected, %String* %message) { +entry: + %0 = icmp ne i64 %actual, %expected + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Diagnostics___1cff4dbf452349c0aab5551517df2535___QsRef3__FormattedFailure____body(i64 %actual, i64 %expected, %String* %message) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___8eccbbf2f2c44c66bcf118fa86e46f90_Rest__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 1, i64 1, i64 0 }, i64 %1, 2 + %3 = call %Array* @__quantum__rt__array_slice_1d(%Array* %array, %Range %2, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + ret %Array* %3 +} + +define internal void @Microsoft__Quantum__Canon___9454df5e02c74932b4fb0c010e9b8d13_ApplyToEachA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { %Qubit*, %Qubit*, %Qubit* }** + %6 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %5, align 8 + %7 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___be8c93aed1174ddf9a1dc1ba0169742c_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %13 = icmp sgt i64 %11, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %10, %preheader__1 ], [ %21, %exiting__2 ] + %14 = icmp sle i64 %idxQubit, %12 + %15 = icmp sge i64 %idxQubit, %12 + %16 = select i1 %13, i1 %14, i1 %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %18 = bitcast i8* %17 to { %Qubit*, %Qubit*, %Qubit* }** + %19 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %18, align 8 + %20 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %19 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %20, %Tuple* null) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %idxQubit, %11 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %22 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %29, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %23) + %26 = bitcast i8* %25 to { %Qubit*, %Qubit*, %Qubit* }** + %27 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %26, align 8 + %28 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %29 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyAnd__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %4 = load %Qubit*, %Qubit** %1, align 8 + %5 = load %Qubit*, %Qubit** %2, align 8 + %6 = load %Qubit*, %Qubit** %3, align 8 + call void @Microsoft__Quantum__Canon__ApplyAnd__body(%Qubit* %4, %Qubit* %5, %Qubit* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyAnd__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %4 = load %Qubit*, %Qubit** %1, align 8 + %5 = load %Qubit*, %Qubit** %2, align 8 + %6 = load %Qubit*, %Qubit** %3, align 8 + call void @Microsoft__Quantum__Canon__ApplyAnd__adj(%Qubit* %4, %Qubit* %5, %Qubit* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyAnd__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyAnd__ctl(%Array* %3, { %Qubit*, %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyAnd__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyAnd__ctladj(%Array* %3, { %Qubit*, %Qubit*, %Qubit* }* %4) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___149f6d26e7564d73ab41ea09c9b1a83d_Zipped3__body(%Array* %first, %Array* %second, %Array* %third) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %second, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %third, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %first) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %second) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %third) + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 3) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i64* + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 1) + %7 = bitcast i8* %6 to i64* + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 2) + %9 = bitcast i8* %8 to i64* + store i64 %0, i64* %5, align 4 + store i64 %1, i64* %7, align 4 + store i64 %2, i64* %9, align 4 + %nElements = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %3) + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %second, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %third, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + ret %Array* %11 + +continue__1: ; preds = %entry + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %first, i64 0) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %second, i64 0) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %third, i64 0) + %19 = bitcast i8* %18 to %Qubit** + %20 = load %Qubit*, %Qubit** %19, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Qubit*, %Qubit*, %Qubit* }* + %23 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %22, i32 0, i32 1 + %25 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %22, i32 0, i32 2 + store %Qubit* %14, %Qubit** %23, align 8 + store %Qubit* %17, %Qubit** %24, align 8 + store %Qubit* %20, %Qubit** %25, align 8 + %26 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %27 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %28 = phi i64 [ 0, %continue__1 ], [ %32, %exiting__1 ] + %29 = icmp sle i64 %28, %27 + br i1 %29, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %26, i64 %28) + %31 = bitcast i8* %30 to { %Qubit*, %Qubit*, %Qubit* }** + store { %Qubit*, %Qubit*, %Qubit* }* %22, { %Qubit*, %Qubit*, %Qubit* }** %31, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %32 = add i64 %28, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %26, %Array** %output, align 8 + %33 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %34 = phi i64 [ 0, %exit__1 ], [ %40, %exiting__2 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %26, i64 %34) + %37 = bitcast i8* %36 to { %Qubit*, %Qubit*, %Qubit* }** + %38 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %37, align 8 + %39 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %39, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %40 = add i64 %34, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %41 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %63, %exiting__3 ] + %42 = icmp sle i64 %idxElement, %41 + br i1 %42, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %43 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 -1) + %44 = call %Array* @__quantum__rt__array_copy(%Array* %43, i1 false) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %first, i64 %idxElement) + %46 = bitcast i8* %45 to %Qubit** + %47 = load %Qubit*, %Qubit** %46, align 8 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %second, i64 %idxElement) + %49 = bitcast i8* %48 to %Qubit** + %50 = load %Qubit*, %Qubit** %49, align 8 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %third, i64 %idxElement) + %52 = bitcast i8* %51 to %Qubit** + %53 = load %Qubit*, %Qubit** %52, align 8 + %54 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* null, i32 1) to i64)) + %55 = bitcast %Tuple* %54 to { %Qubit*, %Qubit*, %Qubit* }* + %56 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %55, i32 0, i32 0 + %57 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %55, i32 0, i32 1 + %58 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %55, i32 0, i32 2 + store %Qubit* %47, %Qubit** %56, align 8 + store %Qubit* %50, %Qubit** %57, align 8 + store %Qubit* %53, %Qubit** %58, align 8 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %idxElement) + %60 = bitcast i8* %59 to { %Qubit*, %Qubit*, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 1) + %61 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %60, align 8 + %62 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %61 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %62, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %62, i32 -1) + store { %Qubit*, %Qubit*, %Qubit* }* %55, { %Qubit*, %Qubit*, %Qubit* }** %60, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + store %Array* %44, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %63 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %64 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %second, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %third, i32 -1) + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %64) + %66 = sub i64 %65, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %67 = phi i64 [ 0, %exit__3 ], [ %73, %exiting__4 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %67) + %70 = bitcast i8* %69 to { %Qubit*, %Qubit*, %Qubit* }** + %71 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %70, align 8 + %72 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %72, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %73 = add i64 %67, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret %Array* %64 +} + +define internal void @Microsoft__Quantum__Canon__ApplyAnd__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to %Qubit** + store %Qubit* %target, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %0) + call void @__quantum__qis__h__body(%Qubit* %target) + call void @__quantum__qis__t__body(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control1, %Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control2, %Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %control2) + call void @__quantum__qis__t__adj(%Qubit* %control1) + call void @__quantum__qis__t__adj(%Qubit* %control2) + call void @__quantum__qis__t__body(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control2) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %target, %Qubit* %control1) + call void @Microsoft__Quantum__Canon__HY__body(%Qubit* %target) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyAnd__adj(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @__quantum__qis__h__body(%Qubit* %target) + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %target, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %result = call %Result* @__quantum__rt__result_get_one() + %msg = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([43 x i8], [43 x i8]* @15, i32 0, i32 0)) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double 5.000000e-01, %String* %msg, double 1.000000e-10) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + %4 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) + %5 = call i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %4) + call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) + br i1 %5, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Canon__CZ__body(%Qubit* %control1, %Qubit* %control2) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyAnd__ctl(%Array* %controls, { %Qubit*, %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control1 = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %control2 = load %Qubit*, %Qubit** %2, align 8 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %6 = bitcast i8* %5 to %Qubit** + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 1) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %control1, %Qubit** %6, align 8 + store %Qubit* %control2, %Qubit** %8, align 8 + %9 = call %Array* @__quantum__rt__array_concatenate(%Array* %controls, %Array* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 1) + call void @Microsoft__Quantum__Canon____QsRef3__ApplyMultiplyControlledAnd____body(%Array* %9, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyAnd__ctladj(%Array* %controls, { %Qubit*, %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control1 = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %control2 = load %Qubit*, %Qubit** %2, align 8 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %6 = bitcast i8* %5 to %Qubit** + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 1) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %control1, %Qubit** %6, align 8 + store %Qubit* %control2, %Qubit** %8, align 8 + %9 = call %Array* @__quantum__rt__array_concatenate(%Array* %controls, %Array* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 1) + call void @Microsoft__Quantum__Canon____QsRef3__ApplyMultiplyControlledAnd____adj(%Array* %9, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon____QsRef3__ApplyAndChain____adj(%Array* %auxRegister, %Array* %ctrlRegister, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %auxRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctrlRegister) + %1 = icmp eq i64 %0, 0 + br i1 %1, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctrlRegister) + %3 = icmp eq i64 %2, 1 + br i1 %3, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %4 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %ctrlRegister) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %4, %Qubit* %target) + br label %continue__1 + +else__1: ; preds = %test1__1 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %auxRegister) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ctrlRegister) + %7 = sub i64 %6, 2 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @14, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactI__body(i64 %5, i64 %7, %String* %8) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ctrlRegister, i64 0) + %10 = bitcast i8* %9 to %Qubit** + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 0) + %14 = bitcast i8* %13 to %Qubit** + store %Qubit* %11, %Qubit** %14, align 8 + %__qsVar0__controls1__ = call %Array* @__quantum__rt__array_concatenate(%Array* %12, %Array* %auxRegister) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controls1__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controls1__, i32 1) + %__qsVar1__controls2__ = call %Array* @Microsoft__Quantum__Arrays___8eccbbf2f2c44c66bcf118fa86e46f90_Rest__body(%Array* %ctrlRegister) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__controls2__, i32 1) + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 0) + %17 = bitcast i8* %16 to %Qubit** + store %Qubit* %target, %Qubit** %17, align 8 + %__qsVar2__targets__ = call %Array* @__quantum__rt__array_concatenate(%Array* %auxRegister, %Array* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__targets__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__targets__, i32 1) + %18 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyAnd__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %19 = call %Array* @Microsoft__Quantum__Arrays___149f6d26e7564d73ab41ea09c9b1a83d_Zipped3__body(%Array* %__qsVar0__controls1__, %Array* %__qsVar1__controls2__, %Array* %__qsVar2__targets__) + call void @Microsoft__Quantum__Canon___9454df5e02c74932b4fb0c010e9b8d13_ApplyToEachA__adj(%Callable* %18, %Array* %19) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controls1__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__controls2__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__targets__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controls1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controls1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__controls2__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__targets__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__targets__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + %20 = call i64 @__quantum__rt__array_get_size_1d(%Array* %19) + %21 = sub i64 %20, 1 + br label %header__1 + +continue__1: ; preds = %exit__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %auxRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrlRegister, i32 -1) + ret void + +header__1: ; preds = %exiting__1, %else__1 + %22 = phi i64 [ 0, %else__1 ], [ %28, %exiting__1 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %22) + %25 = bitcast i8* %24 to { %Qubit*, %Qubit*, %Qubit* }** + %26 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %25, align 8 + %27 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %28 = add i64 %22, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + br label %continue__1 +} + +define internal void @Microsoft__Quantum__Canon___9454df5e02c74932b4fb0c010e9b8d13_ApplyToEachA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { %Qubit*, %Qubit*, %Qubit* }** + %6 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %5, align 8 + %7 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___be8c93aed1174ddf9a1dc1ba0169742c_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + %13 = sub i64 %12, %10 + %14 = sdiv i64 %13, %11 + %15 = mul i64 %11, %14 + %16 = add i64 %10, %15 + %17 = sub i64 0, %11 + %18 = insertvalue %Range zeroinitializer, i64 %16, 0 + %19 = insertvalue %Range %18, i64 %17, 1 + %20 = insertvalue %Range %19, i64 %10, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %24 = icmp sgt i64 %22, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__2 ] + %25 = icmp sle i64 %__qsVar0__idxQubit__, %23 + %26 = icmp sge i64 %__qsVar0__idxQubit__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %28 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %28) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %30 = bitcast i8* %29 to { %Qubit*, %Qubit*, %Qubit* }** + %31 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %30, align 8 + %32 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %31 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %28, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %__qsVar0__idxQubit__, %22 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %35) + %38 = bitcast i8* %37 to { %Qubit*, %Qubit*, %Qubit* }** + %39 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %38, align 8 + %40 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon____QsRef3__ApplyMultiplyControlledAnd____body(%Array* %controls, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %vars = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to %Qubit** + store %Qubit* %target, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %0) + call void @__quantum__qis__h__body(%Qubit* %target) + %code = call %Array* @Microsoft__Quantum__Canon____QsRef3__GrayCode____body(i64 %vars) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %code) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %code, i64 %5) + %8 = bitcast i8* %7 to { i64, i64 }** + %9 = load { i64, i64 }*, { i64, i64 }** %8, align 8 + %10 = bitcast { i64, i64 }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %code, i32 1) + %12 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %j = phi i64 [ 0, %exit__1 ], [ %24, %exiting__2 ] + %13 = icmp sle i64 %j, %12 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %code, i64 %j) + %15 = bitcast i8* %14 to { i64, i64 }** + %16 = load { i64, i64 }*, { i64, i64 }** %15, align 8 + %17 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %16, i32 0, i32 0 + %offset = load i64, i64* %17, align 4 + %18 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %16, i32 0, i32 1 + %ctrl = load i64, i64* %18, align 4 + %19 = call i64 @Microsoft__Quantum__Canon____QsRef3__Angle____body(i64 %offset) + %20 = add i64 %vars, 1 + call void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 -2, i64 %19, i64 %20, %Qubit* %target) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %ctrl) + %22 = bitcast i8* %21 to %Qubit** + %23 = load %Qubit*, %Qubit** %22, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %23, %Qubit* %target) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %24 = add i64 %j, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @Microsoft__Quantum__Canon__HY__body(%Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + %25 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %26 = phi i64 [ 0, %exit__2 ], [ %32, %exiting__3 ] + %27 = icmp sle i64 %26, %25 + br i1 %27, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %code, i64 %26) + %29 = bitcast i8* %28 to { i64, i64 }** + %30 = load { i64, i64 }*, { i64, i64 }** %29, align 8 + %31 = bitcast { i64, i64 }* %30 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %32 = add i64 %26, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %code, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + %33 = sub i64 %3, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %34 = phi i64 [ 0, %exit__3 ], [ %40, %exiting__4 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %code, i64 %34) + %37 = bitcast i8* %36 to { i64, i64 }** + %38 = load { i64, i64 }*, { i64, i64 }** %37, align 8 + %39 = bitcast { i64, i64 }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %40 = add i64 %34, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %code, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + %6 = call %Result* @__quantum__rt__result_get_zero() + call void @Microsoft__Quantum__Diagnostics__AssertQubit__body(%Result* %6, %Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Canon____QsRef3__GrayCode____body(i64 %n) { +entry: + %current = alloca %Array*, align 8 + %j = alloca i64, align 8 + %res = alloca %Array*, align 8 + %N = shl i64 1, %n + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64 }* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64, i64 }* + %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %1, i32 0, i32 1 + store i64 0, i64* %2, align 4 + store i64 0, i64* %3, align 4 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %N) + %5 = sub i64 %N, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %6 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %6) + %9 = bitcast i8* %8 to { i64, i64 }** + store { i64, i64 }* %1, { i64, i64 }** %9, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %4, %Array** %res, align 8 + %11 = sub i64 %N, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %12) + %15 = bitcast i8* %14 to { i64, i64 }** + %16 = load { i64, i64 }*, { i64, i64 }** %15, align 8 + %17 = bitcast { i64, i64 }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + store i64 0, i64* %j, align 4 + %19 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 0, i64 %n) + store %Array* %19, %Array** %current, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %20 = sub i64 %N, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %i = phi i64 [ 0, %exit__2 ], [ %59, %exiting__3 ] + %21 = icmp sle i64 %i, %20 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = srem i64 %i, 2 + %23 = icmp eq i64 %22, 0 + br i1 %23, label %then0__1, label %else__1 + +then0__1: ; preds = %body__3 + store i64 0, i64* %j, align 4 + br label %continue__1 + +else__1: ; preds = %body__3 + %24 = load %Array*, %Array** %current, align 8 + %25 = sub i64 %N, 1 + %26 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %25, 2 + %27 = call %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %26) + %e = call %Array* @Microsoft__Quantum__Arrays___d2cffdff45fe42ce8be877d5eee9264a_Zipped__body(%Array* %24, %Array* %27) + %28 = call i64 @__quantum__rt__array_get_size_1d(%Array* %e) + %29 = sub i64 %28, 1 + br label %header__4 + +continue__1: ; preds = %exit__7, %then0__1 + %30 = load i64, i64* %j, align 4 + %31 = sub i64 %n, 1 + %32 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 0) + %34 = bitcast i8* %33 to i64* + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 1) + %36 = bitcast i8* %35 to i64* + store i64 %30, i64* %34, align 4 + store i64 %31, i64* %36, align 4 + %37 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %32) + %38 = call i64 @Microsoft__Quantum__Math__MaxI__body(i64 0, i64 %37) + store i64 %38, i64* %j, align 4 + %39 = load %Array*, %Array** %res, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 -1) + %40 = call %Array* @__quantum__rt__array_copy(%Array* %39, i1 false) + %41 = load %Array*, %Array** %current, align 8 + %42 = call i64 @Microsoft__Quantum__Convert__BoolArrayAsInt__body(%Array* %41) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64 }* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { i64, i64 }* + %45 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %44, i32 0, i32 1 + store i64 %42, i64* %45, align 4 + store i64 %38, i64* %46, align 4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 %i) + %48 = bitcast i8* %47 to { i64, i64 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 1) + %49 = load { i64, i64 }*, { i64, i64 }** %48, align 8 + %50 = bitcast { i64, i64 }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 -1) + store { i64, i64 }* %44, { i64, i64 }** %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 1) + store %Array* %40, %Array** %res, align 8 + %51 = icmp slt i64 %38, %n + br i1 %51, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %52 = call %Array* @__quantum__rt__array_copy(%Array* %41, i1 false) + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %38) + %54 = bitcast i8* %53 to i1* + %55 = load i1, i1* %54, align 1 + %56 = xor i1 %55, true + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 %38) + %58 = bitcast i8* %57 to i1* + store i1 %56, i1* %58, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %52, i32 1) + store %Array* %52, %Array** %current, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %39, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %continue__2 + %59 = add i64 %i, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %60 = load %Array*, %Array** %res, align 8 + %61 = load %Array*, %Array** %current, align 8 + %62 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %63 = sub i64 %62, 1 + br label %header__8 + +header__4: ; preds = %exiting__4, %else__1 + %64 = phi i64 [ 0, %else__1 ], [ %70, %exiting__4 ] + %65 = icmp sle i64 %64, %29 + br i1 %65, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %e, i64 %64) + %67 = bitcast i8* %66 to { i1, i64 }** + %68 = load { i1, i64 }*, { i1, i64 }** %67, align 8 + %69 = bitcast { i1, i64 }* %68 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %69, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %70 = add i64 %64, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %e, i32 1) + %71 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___b950c6e85b1944ae91a2dee4f20f4c18_Fst__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %72 = call %Array* @Microsoft__Quantum__Arrays___4c1a69a66933447c844ced0239fa6495_Filtered__body(%Callable* %71, %Array* %e) + %73 = call { i1, i64 }* @Microsoft__Quantum__Arrays___819954bcd82146a6a8342446e1190957_Head__body(%Array* %72) + %74 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %73, i32 0, i32 0 + %75 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %73, i32 0, i32 1 + %76 = load i1, i1* %74, align 1 + %77 = load i64, i64* %75, align 4 + %78 = call i64 @Microsoft__Quantum__Canon___8222e5371d1140e18441af955cbd378d_Snd__body(i1 %76, i64 %77) + %79 = add i64 %78, 1 + store i64 %79, i64* %j, align 4 + %80 = sub i64 %28, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %81 = phi i64 [ 0, %exit__4 ], [ %87, %exiting__5 ] + %82 = icmp sle i64 %81, %80 + br i1 %82, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %e, i64 %81) + %84 = bitcast i8* %83 to { i1, i64 }** + %85 = load { i1, i64 }*, { i1, i64 }** %84, align 8 + %86 = bitcast { i1, i64 }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %87 = add i64 %81, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %e, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + %88 = sub i64 %28, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %89 = phi i64 [ 0, %exit__5 ], [ %95, %exiting__6 ] + %90 = icmp sle i64 %89, %88 + br i1 %90, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %e, i64 %89) + %92 = bitcast i8* %91 to { i1, i64 }** + %93 = load { i1, i64 }*, { i1, i64 }** %92, align 8 + %94 = bitcast { i1, i64 }* %93 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %94, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %95 = add i64 %89, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %e, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %71, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %71, i32 -1) + %96 = call i64 @__quantum__rt__array_get_size_1d(%Array* %72) + %97 = sub i64 %96, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %98 = phi i64 [ 0, %exit__6 ], [ %104, %exiting__7 ] + %99 = icmp sle i64 %98, %97 + br i1 %99, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %72, i64 %98) + %101 = bitcast i8* %100 to { i1, i64 }** + %102 = load { i1, i64 }*, { i1, i64 }** %101, align 8 + %103 = bitcast { i1, i64 }* %102 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %103, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %104 = add i64 %98, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + %105 = bitcast { i1, i64 }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %105, i32 -1) + br label %continue__1 + +header__8: ; preds = %exiting__8, %exit__3 + %106 = phi i64 [ 0, %exit__3 ], [ %112, %exiting__8 ] + %107 = icmp sle i64 %106, %63 + br i1 %107, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %106) + %109 = bitcast i8* %108 to { i64, i64 }** + %110 = load { i64, i64 }*, { i64, i64 }** %109, align 8 + %111 = bitcast { i64, i64 }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %111, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %112 = add i64 %106, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %61, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %61, i32 -1) + ret %Array* %60 +} + +define internal void @Microsoft__Quantum__Canon__HY__body(%Qubit* %target) { +entry: + call void @__quantum__qis__h__body(%Qubit* %target) + call void @__quantum__qis__s__body(%Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Canon____QsRef3__ApplyMultiplyControlledAnd____adj(%Array* %controls, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %vars = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + call void @__quantum__qis__h__body(%Qubit* %target) + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %target, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %result = call %Result* @__quantum__rt__result_get_one() + %msg = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([43 x i8], [43 x i8]* @15, i32 0, i32 0)) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double 5.000000e-01, %String* %msg, double 1.000000e-10) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + %4 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) + %5 = call i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %4) + call void @__quantum__rt__result_update_reference_count(%Result* %4, i32 -1) + br i1 %5, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %6 = sub i64 %vars, 1 + br label %header__1 + +continue__1: ; preds = %exit__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + ret void + +header__1: ; preds = %exiting__1, %then0__1 + %i = phi i64 [ 0, %then0__1 ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %i, %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %start = shl i64 1, %i + %code = call %Array* @Microsoft__Quantum__Canon____QsRef3__GrayCode____body(i64 %i) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %code) + %9 = sub i64 %8, 1 + br label %header__2 + +exiting__1: ; preds = %exit__5 + %10 = add i64 %i, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + br label %continue__1 + +header__2: ; preds = %exiting__2, %body__1 + %11 = phi i64 [ 0, %body__1 ], [ %17, %exiting__2 ] + %12 = icmp sle i64 %11, %9 + br i1 %12, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %code, i64 %11) + %14 = bitcast i8* %13 to { i64, i64 }** + %15 = load { i64, i64 }*, { i64, i64 }** %14, align 8 + %16 = bitcast { i64, i64 }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %17 = add i64 %11, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %code, i32 1) + %18 = sub i64 %8, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %j = phi i64 [ 0, %exit__2 ], [ %38, %exiting__3 ] + %19 = icmp sle i64 %j, %18 + br i1 %19, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %code, i64 %j) + %21 = bitcast i8* %20 to { i64, i64 }** + %22 = load { i64, i64 }*, { i64, i64 }** %21, align 8 + %23 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %22, i32 0, i32 0 + %offset = load i64, i64* %23, align 4 + %24 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %22, i32 0, i32 1 + %ctrl = load i64, i64* %24, align 4 + %25 = add i64 %start, %offset + %26 = call i64 @Microsoft__Quantum__Canon____QsRef3__Angle____body(i64 %25) + %27 = sub i64 0, %26 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %i) + %29 = bitcast i8* %28 to %Qubit** + %30 = load %Qubit*, %Qubit** %29, align 8 + call void @Microsoft__Quantum__Intrinsic__RFrac__body(i2 -2, i64 %27, i64 %vars, %Qubit* %30) + %31 = icmp ne i64 %i, 0 + br i1 %31, label %then0__2, label %continue__2 + +then0__2: ; preds = %body__3 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %ctrl) + %33 = bitcast i8* %32 to %Qubit** + %34 = load %Qubit*, %Qubit** %33, align 8 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controls, i64 %i) + %36 = bitcast i8* %35 to %Qubit** + %37 = load %Qubit*, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %34, %Qubit* %37) + br label %continue__2 + +continue__2: ; preds = %then0__2, %body__3 + br label %exiting__3 + +exiting__3: ; preds = %continue__2 + %38 = add i64 %j, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %39 = sub i64 %8, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %40 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %41 = icmp sle i64 %40, %39 + br i1 %41, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %code, i64 %40) + %43 = bitcast i8* %42 to { i64, i64 }** + %44 = load { i64, i64 }*, { i64, i64 }** %43, align 8 + %45 = bitcast { i64, i64 }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %40, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %code, i32 -1) + %47 = sub i64 %8, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %48 = phi i64 [ 0, %exit__4 ], [ %54, %exiting__5 ] + %49 = icmp sle i64 %48, %47 + br i1 %49, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %code, i64 %48) + %51 = bitcast i8* %50 to { i64, i64 }** + %52 = load { i64, i64 }*, { i64, i64 }** %51, align 8 + %53 = bitcast { i64, i64 }* %52 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %54 = add i64 %48, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %code, i32 -1) + br label %exiting__1 +} + +declare void @__quantum__qis__assertmeasurementprobability__body(%Array*, %Array*, %Result*, double, %String*, double) + +define internal i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %input) { +entry: + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %input, %Result* %0) + ret i1 %1 +} + +define internal %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) { +entry: + %result = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %target) + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %0) + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret %Result* %result +} + +define internal %Array* @Microsoft__Quantum__Arrays___d2cffdff45fe42ce8be877d5eee9264a_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i1* + %7 = load i1, i1* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to i64* + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, i64 }* getelementptr ({ i1, i64 }, { i1, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i1, i64 }* + %13 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %12, i32 0, i32 1 + store i1 %7, i1* %13, align 1 + store i64 %10, i64* %14, align 4 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i1, i64 }** + store { i1, i64 }* %12, { i1, i64 }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i1, i64 }** + %27 = load { i1, i64 }*, { i1, i64 }** %26, align 8 + %28 = bitcast { i1, i64 }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i1* + %36 = load i1, i1* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to i64* + %39 = load i64, i64* %38, align 4 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, i64 }* getelementptr ({ i1, i64 }, { i1, i64 }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i1, i64 }* + %42 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %41, i32 0, i32 1 + store i1 %36, i1* %42, align 1 + store i64 %39, i64* %43, align 4 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i1, i64 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i1, i64 }*, { i1, i64 }** %45, align 8 + %47 = bitcast { i1, i64 }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i1, i64 }* %41, { i1, i64 }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i1, i64 }** + %56 = load { i1, i64 }*, { i1, i64 }** %55, align 8 + %57 = bitcast { i1, i64 }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal i64 @Microsoft__Quantum__Canon___8222e5371d1140e18441af955cbd378d_Snd__body(i1 %0, i64 %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, i64 }* getelementptr ({ i1, i64 }, { i1, i64 }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { i1, i64 }* + %3 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %pair, i32 0, i32 1 + store i1 %0, i1* %3, align 1 + store i64 %1, i64* %4, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret i64 %1 +} + +define internal { i1, i64 }* @Microsoft__Quantum__Arrays___819954bcd82146a6a8342446e1190957_Head__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i1, i64 }** + %6 = load { i1, i64 }*, { i1, i64 }** %5, align 8 + %7 = bitcast { i1, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = icmp sgt i64 %0, 0 + %10 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @33, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %9, i1 true, %String* %10) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %12 = bitcast i8* %11 to { i1, i64 }** + %13 = load { i1, i64 }*, { i1, i64 }** %12, align 8 + %14 = bitcast { i1, i64 }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to { i1, i64 }** + %20 = load { i1, i64 }*, { i1, i64 }** %19, align 8 + %21 = bitcast { i1, i64 }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + ret { i1, i64 }* %13 +} + +define internal %Array* @Microsoft__Quantum__Arrays___4c1a69a66933447c844ced0239fa6495_Filtered__body(%Callable* %predicate, %Array* %array) { +entry: + %idxArray = alloca %Array*, align 8 + %totalFound = alloca i64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %predicate, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %predicate, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i1, i64 }** + %6 = load { i1, i64 }*, { i1, i64 }** %5, align 8 + %7 = bitcast { i1, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store i64 0, i64* %totalFound, align 4 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %0) + %10 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %11 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 %11) + %14 = bitcast i8* %13 to i64* + store i64 0, i64* %14, align 4 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %11, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %9, %Array** %idxArray, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %16 = call %Range @Microsoft__Quantum__Arrays___94a71e0233254bc7929a7d7210bcd75d_IndexRange__body(%Array* %array) + %17 = extractvalue %Range %16, 0 + %18 = extractvalue %Range %16, 1 + %19 = extractvalue %Range %16, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__2 + %20 = icmp sgt i64 %18, 0 + br label %header__3 + +header__3: ; preds = %exiting__3, %preheader__1 + %idxElement = phi i64 [ %17, %preheader__1 ], [ %38, %exiting__3 ] + %21 = icmp sle i64 %idxElement, %19 + %22 = icmp sge i64 %idxElement, %19 + %23 = select i1 %20, i1 %21, i1 %22 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %25 = bitcast i8* %24 to { i1, i64 }** + %26 = load { i1, i64 }*, { i1, i64 }** %25, align 8 + %27 = bitcast { i1, i64 }* %26 to %Tuple* + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %predicate, %Tuple* %27, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { i1 }* + %30 = getelementptr inbounds { i1 }, { i1 }* %29, i32 0, i32 0 + %31 = load i1, i1* %30, align 1 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br i1 %31, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__3 + %32 = load %Array*, %Array** %idxArray, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = load i64, i64* %totalFound, align 4 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %34) + %36 = bitcast i8* %35 to i64* + store i64 %idxElement, i64* %36, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %idxArray, align 8 + %37 = add i64 %34, 1 + store i64 %37, i64* %totalFound, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__3 + br label %exiting__3 + +exiting__3: ; preds = %continue__1 + %38 = add i64 %idxElement, %18 + br label %header__3 + +exit__3: ; preds = %header__3 + %39 = load %Array*, %Array** %idxArray, align 8 + %40 = load i64, i64* %totalFound, align 4 + %41 = sub i64 %40, 1 + %42 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %41, 2 + %43 = call %Array* @__quantum__rt__array_slice_1d(%Array* %39, %Range %42, i1 true) + %44 = call %Array* @Microsoft__Quantum__Arrays___350337fdf0114f61a4061047b90dbf85_Subarray__body(%Array* %43, %Array* %array) + call void @__quantum__rt__capture_update_alias_count(%Callable* %predicate, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %predicate, i32 -1) + %45 = sub i64 %0, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %46) + %49 = bitcast i8* %48 to { i1, i64 }** + %50 = load { i1, i64 }*, { i1, i64 }** %49, align 8 + %51 = bitcast { i1, i64 }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %43, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %39, i32 -1) + ret %Array* %44 +} + +define internal void @Microsoft__Quantum__Canon___b950c6e85b1944ae91a2dee4f20f4c18_Fst__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, i64 }* + %1 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %0, i32 0, i32 1 + %3 = load i1, i1* %1, align 1 + %4 = load i64, i64* %2, align 4 + %5 = call i1 @Microsoft__Quantum__Canon___b950c6e85b1944ae91a2dee4f20f4c18_Fst__body(i1 %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { i1 }* + %7 = getelementptr inbounds { i1 }, { i1 }* %6, i32 0, i32 0 + store i1 %5, i1* %7, align 1 + ret void +} + +define internal i1 @Microsoft__Quantum__Canon___b950c6e85b1944ae91a2dee4f20f4c18_Fst__body(i1 %0, i64 %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, i64 }* getelementptr ({ i1, i64 }, { i1, i64 }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { i1, i64 }* + %3 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %pair, i32 0, i32 1 + store i1 %0, i1* %3, align 1 + store i64 %1, i64* %4, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret i1 %0 +} + +define internal { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %coefficients) { +entry: + %coefficients1 = alloca %Array*, align 8 + %coefficients0 = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %newCoefficientsLength = sdiv i64 %0, 2 + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %2 = sub i64 %newCoefficientsLength, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to double* + store double 0.000000e+00, double* %6, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %1, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %9 = sub i64 %newCoefficientsLength, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %14, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to double* + store double 0.000000e+00, double* %13, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %14 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %8, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %newCoefficientsLength, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxCoeff = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %16 = icmp sle i64 %idxCoeff, %15 + br i1 %16, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %17 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = add i64 %idxCoeff, %newCoefficientsLength + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %22) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %26 = fadd double %21, %25 + %27 = fmul double 5.000000e-01, %26 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idxCoeff) + %29 = bitcast i8* %28 to double* + store double %27, double* %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %coefficients0, align 8 + %30 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + %31 = call %Array* @__quantum__rt__array_copy(%Array* %30, i1 false) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %33 = bitcast i8* %32 to double* + %34 = load double, double* %33, align 8 + %35 = add i64 %idxCoeff, %newCoefficientsLength + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %35) + %37 = bitcast i8* %36 to double* + %38 = load double, double* %37, align 8 + %39 = fsub double %34, %38 + %40 = fmul double 5.000000e-01, %39 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %idxCoeff) + %42 = bitcast i8* %41 to double* + %43 = load double, double* %42, align 8 + store double %40, double* %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + store %Array* %31, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %idxCoeff, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %45 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 1) + %46 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Array*, %Array* }* + %49 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 1 + store %Array* %45, %Array** %49, align 8 + store %Array* %46, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + ret { %Array*, %Array* }* %48 +} + +define internal double @Microsoft__Quantum__Canon____QsRef3__TrotterStepSize____body(i64 %order) { +entry: + %0 = sitofp i64 %order to double + %1 = fsub double %0, 1.000000e+00 + %2 = fdiv double 1.000000e+00, %1 + %3 = call double @Microsoft__Quantum__Math__PowD__body(double 4.000000e+00, double %2) + %4 = fsub double 4.000000e+00, %3 + %5 = fdiv double 1.000000e+00, %4 + ret double %5 +} + +define internal void @Microsoft__Quantum__Canon__CZ__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__body(%Array* %pauli, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___bb1c4270ad5b4be0bab2024b59f8dcff_Zipped__body(%Array* %pauli, %Array* %target) + call void @Microsoft__Quantum__Canon___df1542a7adba4b8098971b98d88b9125_ApplyToEachCA__body(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { i2, %Qubit* }** + %8 = load { i2, %Qubit* }*, { i2, %Qubit* }** %7, align 8 + %9 = bitcast { i2, %Qubit* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___df1542a7adba4b8098971b98d88b9125_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___efb0462d99e445f898c5269230c6127d_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %13 = icmp sgt i64 %11, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %10, %preheader__1 ], [ %21, %exiting__2 ] + %14 = icmp sle i64 %idxQubit, %12 + %15 = icmp sge i64 %idxQubit, %12 + %16 = select i1 %13, i1 %14, i1 %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %18 = bitcast i8* %17 to { i2, %Qubit* }** + %19 = load { i2, %Qubit* }*, { i2, %Qubit* }** %18, align 8 + %20 = bitcast { i2, %Qubit* }* %19 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %20, %Tuple* null) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %idxQubit, %11 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %22 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %29, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %23) + %26 = bitcast i8* %25 to { i2, %Qubit* }** + %27 = load { i2, %Qubit* }*, { i2, %Qubit* }** %26, align 8 + %28 = bitcast { i2, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %29 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i2, %Qubit* }* + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %3 = load i2, i2* %1, align 1 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__body(i2 %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i2, %Qubit* }* + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %3 = load i2, i2* %1, align 1 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i2, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i2, %Qubit* }*, { i2, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %3, { i2, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i2, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i2, %Qubit* }*, { i2, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %3, { i2, %Qubit* }* %4) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___bb1c4270ad5b4be0bab2024b59f8dcff_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i2* + %7 = load i2, i2* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, %Qubit* }* + %13 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %12, i32 0, i32 1 + store i2 %7, i2* %13, align 1 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i2, %Qubit* }** + store { i2, %Qubit* }* %12, { i2, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i2, %Qubit* }** + %27 = load { i2, %Qubit* }*, { i2, %Qubit* }** %26, align 8 + %28 = bitcast { i2, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i2* + %36 = load i2, i2* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i2, %Qubit* }* + %42 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %41, i32 0, i32 1 + store i2 %36, i2* %42, align 1 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i2, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i2, %Qubit* }*, { i2, %Qubit* }** %45, align 8 + %47 = bitcast { i2, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i2, %Qubit* }* %41, { i2, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i2, %Qubit* }** + %56 = load { i2, %Qubit* }*, { i2, %Qubit* }** %55, align 8 + %57 = bitcast { i2, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__adj(%Array* %pauli, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___bb1c4270ad5b4be0bab2024b59f8dcff_Zipped__body(%Array* %pauli, %Array* %target) + call void @Microsoft__Quantum__Canon___df1542a7adba4b8098971b98d88b9125_ApplyToEachCA__adj(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { i2, %Qubit* }** + %8 = load { i2, %Qubit* }*, { i2, %Qubit* }** %7, align 8 + %9 = bitcast { i2, %Qubit* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___df1542a7adba4b8098971b98d88b9125_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___efb0462d99e445f898c5269230c6127d_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + %13 = sub i64 %12, %10 + %14 = sdiv i64 %13, %11 + %15 = mul i64 %11, %14 + %16 = add i64 %10, %15 + %17 = sub i64 0, %11 + %18 = insertvalue %Range zeroinitializer, i64 %16, 0 + %19 = insertvalue %Range %18, i64 %17, 1 + %20 = insertvalue %Range %19, i64 %10, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %24 = icmp sgt i64 %22, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__2 ] + %25 = icmp sle i64 %__qsVar0__idxQubit__, %23 + %26 = icmp sge i64 %__qsVar0__idxQubit__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %28 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %28) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %30 = bitcast i8* %29 to { i2, %Qubit* }** + %31 = load { i2, %Qubit* }*, { i2, %Qubit* }** %30, align 8 + %32 = bitcast { i2, %Qubit* }* %31 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %28, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %__qsVar0__idxQubit__, %22 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %35) + %38 = bitcast i8* %37 to { i2, %Qubit* }** + %39 = load { i2, %Qubit* }*, { i2, %Qubit* }** %38, align 8 + %40 = bitcast { i2, %Qubit* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___bb1c4270ad5b4be0bab2024b59f8dcff_Zipped__body(%Array* %pauli, %Array* %target) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___df1542a7adba4b8098971b98d88b9125_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %10 = sub i64 %9, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %11 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %11) + %14 = bitcast i8* %13 to { i2, %Qubit* }** + %15 = load { i2, %Qubit* }*, { i2, %Qubit* }** %14, align 8 + %16 = bitcast { i2, %Qubit* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___df1542a7adba4b8098971b98d88b9125_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %5) + %8 = bitcast i8* %7 to { i2, %Qubit* }** + %9 = load { i2, %Qubit* }*, { i2, %Qubit* }** %8, align 8 + %10 = bitcast { i2, %Qubit* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %12 = call %Range @Microsoft__Quantum__Arrays___efb0462d99e445f898c5269230c6127d_IndexRange__body(%Array* %register) + %13 = extractvalue %Range %12, 0 + %14 = extractvalue %Range %12, 1 + %15 = extractvalue %Range %12, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %16 = icmp sgt i64 %14, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %13, %preheader__1 ], [ %29, %exiting__2 ] + %17 = icmp sle i64 %idxQubit, %15 + %18 = icmp sge i64 %idxQubit, %15 + %19 = select i1 %16, i1 %17, i1 %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %20) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %22 = bitcast i8* %21 to { i2, %Qubit* }** + %23 = load { i2, %Qubit* }*, { i2, %Qubit* }** %22, align 8 + %24 = bitcast { i2, %Qubit* }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i2, %Qubit* }* }* getelementptr ({ %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array*, { i2, %Qubit* }* }* + %27 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %26, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %27, align 8 + store { i2, %Qubit* }* %23, { i2, %Qubit* }** %28, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %25, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %idxQubit, %14 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %30 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %37, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %31) + %34 = bitcast i8* %33 to { i2, %Qubit* }** + %35 = load { i2, %Qubit* }*, { i2, %Qubit* }** %34, align 8 + %36 = bitcast { i2, %Qubit* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %37 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___bb1c4270ad5b4be0bab2024b59f8dcff_Zipped__body(%Array* %pauli, %Array* %target) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___df1542a7adba4b8098971b98d88b9125_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %10 = sub i64 %9, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %11 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %11) + %14 = bitcast i8* %13 to { i2, %Qubit* }** + %15 = load { i2, %Qubit* }*, { i2, %Qubit* }** %14, align 8 + %16 = bitcast { i2, %Qubit* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___df1542a7adba4b8098971b98d88b9125_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %5) + %8 = bitcast i8* %7 to { i2, %Qubit* }** + %9 = load { i2, %Qubit* }*, { i2, %Qubit* }** %8, align 8 + %10 = bitcast { i2, %Qubit* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %12 = call %Range @Microsoft__Quantum__Arrays___efb0462d99e445f898c5269230c6127d_IndexRange__body(%Array* %register) + %13 = extractvalue %Range %12, 0 + %14 = extractvalue %Range %12, 1 + %15 = extractvalue %Range %12, 2 + %16 = sub i64 %15, %13 + %17 = sdiv i64 %16, %14 + %18 = mul i64 %14, %17 + %19 = add i64 %13, %18 + %20 = sub i64 0, %14 + %21 = insertvalue %Range zeroinitializer, i64 %19, 0 + %22 = insertvalue %Range %21, i64 %20, 1 + %23 = insertvalue %Range %22, i64 %13, 2 + %24 = extractvalue %Range %23, 0 + %25 = extractvalue %Range %23, 1 + %26 = extractvalue %Range %23, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %27 = icmp sgt i64 %25, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %24, %preheader__1 ], [ %40, %exiting__2 ] + %28 = icmp sle i64 %__qsVar0__idxQubit__, %26 + %29 = icmp sge i64 %__qsVar0__idxQubit__, %26 + %30 = select i1 %27, i1 %28, i1 %29 + br i1 %30, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %31 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %31) + call void @__quantum__rt__callable_make_controlled(%Callable* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %33 = bitcast i8* %32 to { i2, %Qubit* }** + %34 = load { i2, %Qubit* }*, { i2, %Qubit* }** %33, align 8 + %35 = bitcast { i2, %Qubit* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i2, %Qubit* }* }* getelementptr ({ %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, { i2, %Qubit* }* }* + %38 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %37, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %38, align 8 + store { i2, %Qubit* }* %34, { i2, %Qubit* }** %39, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %31, %Tuple* %36, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %40 = add i64 %__qsVar0__idxQubit__, %25 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %41 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %42 = phi i64 [ 0, %exit__2 ], [ %48, %exiting__3 ] + %43 = icmp sle i64 %42, %41 + br i1 %43, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %42) + %45 = bitcast i8* %44 to { i2, %Qubit* }** + %46 = load { i2, %Qubit* }*, { i2, %Qubit* }** %45, align 8 + %47 = bitcast { i2, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %42, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 %pauli, i1 %bitApply, %Array* %bits, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @Microsoft__Quantum__Arrays___6cd6404b5119404bb0bed9c3be2cb761_Zipped__body(%Array* %bits, %Array* %qubits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %3) + %6 = bitcast i8* %5 to { i1, %Qubit* }** + %7 = load { i1, %Qubit* }*, { i1, %Qubit* }** %6, align 8 + %8 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %7, i32 0, i32 0 + %bit = load i1, i1* %8, align 1 + %9 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %7, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %9, align 8 + %10 = icmp eq i1 %bit, %bitApply + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %11 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %12 = sub i64 %1, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %13) + %16 = bitcast i8* %15 to { i1, %Qubit* }** + %17 = load { i1, %Qubit* }*, { i1, %Qubit* }** %16, align 8 + %18 = bitcast { i1, %Qubit* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___6cd6404b5119404bb0bed9c3be2cb761_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i1* + %7 = load i1, i1* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i1, %Qubit* }* + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + store i1 %7, i1* %13, align 1 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i1, %Qubit* }** + store { i1, %Qubit* }* %12, { i1, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i1, %Qubit* }** + %27 = load { i1, %Qubit* }*, { i1, %Qubit* }** %26, align 8 + %28 = bitcast { i1, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i1* + %36 = load i1, i1* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i1, %Qubit* }* + %42 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 1 + store i1 %36, i1* %42, align 1 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i1, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i1, %Qubit* }*, { i1, %Qubit* }** %45, align 8 + %47 = bitcast { i1, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i1, %Qubit* }* %41, { i1, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i1, %Qubit* }** + %56 = load { i1, %Qubit* }*, { i1, %Qubit* }** %55, align 8 + %57 = bitcast { i1, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 %pauli, i1 %bitApply, %Array* %bits, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %__qsVar0__nBits__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @Microsoft__Quantum__Arrays___6cd6404b5119404bb0bed9c3be2cb761_Zipped__body(%Array* %bits, %Array* %qubits) + %1 = call %Array* @Microsoft__Quantum__Arrays___6cd6404b5119404bb0bed9c3be2cb761_Zipped__body(%Array* %bits, %Array* %qubits) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + %4 = insertvalue %Range zeroinitializer, i64 %3, 0 + %5 = insertvalue %Range %4, i64 -1, 1 + %6 = insertvalue %Range %5, i64 0, 2 + %7 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %6, i1 true) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %7) + %9 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %10 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 %10) + %13 = bitcast i8* %12 to { i1, %Qubit* }** + %14 = load { i1, %Qubit* }*, { i1, %Qubit* }** %13, align 8 + %15 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %14, i32 0, i32 0 + %__qsVar1__bit__ = load i1, i1* %15, align 1 + %16 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %14, i32 0, i32 1 + %__qsVar2__qubit__ = load %Qubit*, %Qubit** %16, align 8 + %17 = icmp eq i1 %__qsVar1__bit__, %bitApply + br i1 %17, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %__qsVar2__qubit__) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %21) + %24 = bitcast i8* %23 to { i1, %Qubit* }** + %25 = load { i1, %Qubit* }*, { i1, %Qubit* }** %24, align 8 + %26 = bitcast { i1, %Qubit* }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %29) + %32 = bitcast i8* %31 to { i1, %Qubit* }** + %33 = load { i1, %Qubit* }*, { i1, %Qubit* }** %32, align 8 + %34 = bitcast { i1, %Qubit* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__ctl(%Array* %__controlQubits__, { i2, i1, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 1 + %bitApply = load i1, i1* %2, align 1 + %3 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 2 + %bits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %4 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %5 = call %Array* @Microsoft__Quantum__Arrays___6cd6404b5119404bb0bed9c3be2cb761_Zipped__body(%Array* %bits, %Array* %qubits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %7 = sub i64 %6, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %8 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %9 = icmp sle i64 %8, %7 + br i1 %9, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %8) + %11 = bitcast i8* %10 to { i1, %Qubit* }** + %12 = load { i1, %Qubit* }*, { i1, %Qubit* }** %11, align 8 + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %bit = load i1, i1* %13, align 1 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %14, align 8 + %15 = icmp eq i1 %bit, %bitApply + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i2, %Qubit* }* + %18 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %17, i32 0, i32 1 + store i2 %pauli, i2* %18, align 1 + store %Qubit* %qubit, %Qubit** %19, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %17) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %20 = add i64 %8, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %21 = sub i64 %6, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %22 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %22) + %25 = bitcast i8* %24 to { i1, %Qubit* }** + %26 = load { i1, %Qubit* }*, { i1, %Qubit* }** %25, align 8 + %27 = bitcast { i1, %Qubit* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %22, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__ctladj(%Array* %__controlQubits__, { i2, i1, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 1 + %bitApply = load i1, i1* %2, align 1 + %3 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 2 + %bits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %4 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %__qsVar0__nBits__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %5 = call %Array* @Microsoft__Quantum__Arrays___6cd6404b5119404bb0bed9c3be2cb761_Zipped__body(%Array* %bits, %Array* %qubits) + %6 = call %Array* @Microsoft__Quantum__Arrays___6cd6404b5119404bb0bed9c3be2cb761_Zipped__body(%Array* %bits, %Array* %qubits) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %5, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %15 = phi i64 [ 0, %entry ], [ %27, %exiting__1 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to { i1, %Qubit* }** + %19 = load { i1, %Qubit* }*, { i1, %Qubit* }** %18, align 8 + %20 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %19, i32 0, i32 0 + %__qsVar1__bit__ = load i1, i1* %20, align 1 + %21 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %19, i32 0, i32 1 + %__qsVar2__qubit__ = load %Qubit*, %Qubit** %21, align 8 + %22 = icmp eq i1 %__qsVar1__bit__, %bitApply + br i1 %22, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i2, %Qubit* }* + %25 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %24, i32 0, i32 1 + store i2 %pauli, i2* %25, align 1 + store %Qubit* %__qsVar2__qubit__, %Qubit** %26, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %24) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %27 = add i64 %15, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %28 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %29 = sub i64 %28, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %30 = phi i64 [ 0, %exit__1 ], [ %36, %exiting__2 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %30) + %33 = bitcast i8* %32 to { i1, %Qubit* }** + %34 = load { i1, %Qubit* }*, { i1, %Qubit* }** %33, align 8 + %35 = bitcast { i1, %Qubit* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %30, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + %37 = sub i64 %7, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %38) + %41 = bitcast i8* %40 to { i1, %Qubit* }** + %42 = load { i1, %Qubit* }*, { i1, %Qubit* }** %41, align 8 + %43 = bitcast { i1, %Qubit* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___4b7f49d75c874a3d9275080e156c5c7c_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @16, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %11 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %qubits__1) + %12 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %11) + %13 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %12, %Qubit* %13) + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %15 = icmp eq i64 %14, 2 + br i1 %15, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call double @Microsoft__Quantum__Math__AbsD__body(double %18) + %20 = fcmp ogt double %19, %tolerance + br i1 %20, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %22 = bitcast i8* %21 to i2* + store i2 0, i2* %22, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %theta = fmul double 1.000000e+00, %25 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %26 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %qubits__1) + %27 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %26) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients0, { %Array* }* %27) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %27, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %31 = getelementptr inbounds { %Array* }, { %Array* }* %12, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Arrays___4b7f49d75c874a3d9275080e156c5c7c_IsEmpty__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp eq i64 %0, 0 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %nElementsTotal, double %defaultElement, %Array* %inputArray) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %0 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @35, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %0, i1 true, %String* %1) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___aadf45b9686643c385c8db16f19e226a_ConstantArray__body(i64 %nElementsPad, double %defaultElement) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %2 = icmp sge i64 %nElementsTotal, 0 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %3 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %5 = phi %Array* [ %3, %condTrue__1 ], [ %4, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %5 +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %23 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %24, %Qubit* %target) + %25 = call i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %25, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %26 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %26, %Qubit* %target) + %27 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %1) + %28 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %27) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %28, %Qubit* %target) + %29 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %29, %Qubit* %target) + %30 = getelementptr inbounds { %Array* }, { %Array* }* %28, i32 0, i32 0 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + %32 = bitcast { %Array* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %33 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 2 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + %3 = call %Array* @__quantum__rt__array_slice_1d(%Array* %array, %Range %2, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + ret %Array* %3 +} + +define internal %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp sgt i64 %0, 0 + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @33, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %1, i1 true, %String* %2) + %3 = sub i64 %0, 1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + ret %Qubit* %6 +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___4b7f49d75c874a3d9275080e156c5c7c_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @16, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %12 = icmp eq i64 %11, 2 + br i1 %12, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %14 = bitcast i8* %13 to double* + %15 = load double, double* %14, align 8 + %16 = call double @Microsoft__Quantum__Math__AbsD__body(double %15) + %17 = fcmp ogt double %16, %tolerance + br i1 %17, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %19 = bitcast i8* %18 to i2* + store i2 0, i2* %19, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %21 = bitcast i8* %20 to double* + %22 = load double, double* %21, align 8 + %theta = fmul double 1.000000e+00, %22 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %23 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %qubits__1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %24) + %25 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + %27 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %28 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %qubits__1) + %29 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %28) + %30 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %29, %Qubit* %30) + %31 = getelementptr inbounds { %Array* }, { %Array* }* %29, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %23 = call i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %23, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %24 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %24, %Qubit* %target) + %25 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %1) + %26 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %25) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %26, %Qubit* %target) + %27 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %27, %Qubit* %target) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %26, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %31 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %1) + %32 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %31) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %32, %Qubit* %target) + %33 = getelementptr inbounds { %Array* }, { %Array* }* %32, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %32 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___4b7f49d75c874a3d9275080e156c5c7c_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @16, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 1) + %15 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %qubits__1) + %16 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + %17 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %qubits__1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, { %Array* }*, %Qubit* }* + %20 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 3 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients1, %Array** %21, align 8 + store { %Array* }* %16, { %Array* }** %22, align 8 + store %Qubit* %17, %Qubit** %23, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %19) + %24 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %25 = icmp eq i64 %24, 2 + br i1 %25, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %27 = bitcast i8* %26 to double* + %28 = load double, double* %27, align 8 + %29 = call double @Microsoft__Quantum__Math__AbsD__body(double %28) + %30 = fcmp ogt double %29, %tolerance + br i1 %30, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %32 = bitcast i8* %31 to i2* + store i2 0, i2* %32, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %34 = bitcast i8* %33 to double* + %35 = load double, double* %34, align 8 + %theta = fmul double 1.000000e+00, %35 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, double, %Array* }* + %38 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 1 + %40 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 2 + store %Array* %paulis, %Array** %38, align 8 + store double %theta, double* %39, align 8 + store %Array* %qubits__1, %Array** %40, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %37) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 1) + %41 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %qubits__1) + %42 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %41) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { double, %Array*, { %Array* }* }* + %45 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 2 + store double %tolerance, double* %45, align 8 + store %Array* %coefficients0, %Array** %46, align 8 + store { %Array* }* %42, { %Array* }** %47, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %44) + %48 = getelementptr inbounds { %Array* }, { %Array* }* %42, i32 0, i32 0 + %49 = load %Array*, %Array** %48, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %50 = bitcast { %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %51 = getelementptr inbounds { %Array* }, { %Array* }* %16, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %control, %Qubit* %target) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___4b7f49d75c874a3d9275080e156c5c7c_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @16, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %16 = icmp eq i64 %15, 2 + br i1 %16, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %18 = bitcast i8* %17 to double* + %19 = load double, double* %18, align 8 + %20 = call double @Microsoft__Quantum__Math__AbsD__body(double %19) + %21 = fcmp ogt double %20, %tolerance + br i1 %21, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + store i2 0, i2* %23, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %25 = bitcast i8* %24 to double* + %26 = load double, double* %25, align 8 + %theta = fmul double 1.000000e+00, %26 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, double, %Array* }* + %29 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 2 + store %Array* %paulis, %Array** %29, align 8 + store double %theta, double* %30, align 8 + store %Array* %qubits__1, %Array** %31, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %28) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 1) + %32 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %qubits__1) + %33 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %32) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double, %Array*, { %Array* }* }* + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 1 + %38 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 2 + store double %tolerance, double* %36, align 8 + store %Array* %__qsVar1__coefficients0__, %Array** %37, align 8 + store { %Array* }* %33, { %Array* }** %38, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %35) + %39 = getelementptr inbounds { %Array* }, { %Array* }* %33, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + %41 = bitcast { %Array* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 1) + %42 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %qubits__1) + %43 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %42) + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + %44 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %qubits__1) + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { double, %Array*, { %Array* }*, %Qubit* }* + %47 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 1 + %49 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 2 + %50 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 3 + store double %tolerance, double* %47, align 8 + store %Array* %__qsVar2__coefficients1__, %Array** %48, align 8 + store { %Array* }* %43, { %Array* }** %49, align 8 + store %Qubit* %44, %Qubit** %50, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %46) + %51 = getelementptr inbounds { %Array* }, { %Array* }* %43, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___363cc02fdfc747c5803543b2d61fb3a1_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef3__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__14__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %target, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %14 = icmp eq i2 %pauli, 1 + br i1 %14, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %18 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 4 + store %Callable* %15, %Callable** %18, align 8 + store double %tolerance, double* %19, align 8 + store %Array* %coefficients, %Array** %20, align 8 + store i2 -2, i2* %21, align 1 + store { %Array* }* %control, { %Array* }** %22, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__15__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %16) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__body(%Callable* %23, %Callable* %op__1, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %24 = icmp eq i2 %pauli, -1 + br i1 %24, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %25 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 2 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 3 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 4 + store %Callable* %25, %Callable** %28, align 8 + store double %tolerance, double* %29, align 8 + store %Array* %coefficients, %Array** %30, align 8 + store i2 1, i2* %31, align 1 + store { %Array* }* %control, { %Array* }** %32, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__16__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %26) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %33 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__body(%Callable* %33, %Callable* %op__2, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %34 = icmp eq i2 %pauli, 0 + br i1 %34, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %35 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @17, i32 0, i32 0)) + %36 = icmp eq i2 1, %pauli + br i1 %36, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @18, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %38 = icmp eq i2 -1, %pauli + br i1 %38, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %39 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @19, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %40 = icmp eq i2 -2, %pauli + br i1 %40, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @20, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @21, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %43 = phi %String* [ %41, %condTrue__3 ], [ %42, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %44 = phi %String* [ %39, %condTrue__2 ], [ %43, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %45 = phi %String* [ %37, %condTrue__1 ], [ %44, %condContinue__2 ] + %46 = call %String* @__quantum__rt__string_concatenate(%String* %35, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @9, i32 0, i32 0)) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %48) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__10__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__10__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__15__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__11__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__11__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__body(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Qubit* }* + %2 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %1, i32 0, i32 0 + store %Qubit* %target, %Qubit** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %0, %Tuple* null) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit* }* + %5 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %4, i32 0, i32 0 + store %Qubit* %target, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %innerOperation, %Tuple* %3, %Tuple* null) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Qubit* }* + %9 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %8, i32 0, i32 0 + store %Qubit* %target, %Qubit** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Lifted__PartialApplication__16__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__17__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Qubit* }* + %14 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %13, i32 0, i32 0 + store %Qubit* %target, %Qubit** %14, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %15 = icmp eq i2 %pauli, 1 + br i1 %15, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 2 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 3 + %23 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 4 + store %Callable* %16, %Callable** %19, align 8 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients, %Array** %21, align 8 + store i2 -2, i2* %22, align 1 + store { %Array* }* %control, { %Array* }** %23, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__18__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %17) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %24 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__adj(%Callable* %24, %Callable* %__qsVar1__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %25 = icmp eq i2 %pauli, -1 + br i1 %25, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %26 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 2 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 3 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 4 + store %Callable* %26, %Callable** %29, align 8 + store double %tolerance, double* %30, align 8 + store %Array* %coefficients, %Array** %31, align 8 + store i2 1, i2* %32, align 1 + store { %Array* }* %control, { %Array* }** %33, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__19__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %27) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %34 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %34) + call void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__adj(%Callable* %34, %Callable* %__qsVar2__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %35 = icmp eq i2 %pauli, 0 + br i1 %35, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %36 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @17, i32 0, i32 0)) + %37 = icmp eq i2 1, %pauli + br i1 %37, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %38 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @18, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %39 = icmp eq i2 -1, %pauli + br i1 %39, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %40 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @19, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %41 = icmp eq i2 -2, %pauli + br i1 %41, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @20, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %43 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @21, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %44 = phi %String* [ %42, %condTrue__3 ], [ %43, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %45 = phi %String* [ %40, %condTrue__2 ], [ %44, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %46 = phi %String* [ %38, %condTrue__1 ], [ %45, %condContinue__2 ] + %47 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %46) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + %48 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @9, i32 0, i32 0)) + %49 = call %String* @__quantum__rt__string_concatenate(%String* %47, %String* %48) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %49) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__20__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__21__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %op__1, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__22__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %op__2, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @17, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @18, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @19, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @20, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @21, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @9, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__23__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %17) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__24__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %__qsVar1__op__, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__25__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %__qsVar2__op__, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @17, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @18, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @19, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @20, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @21, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @9, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__adj(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Qubit* }* + %3 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %2, i32 0, i32 0 + store %Qubit* %target, %Qubit** %3, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %1, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Qubit* }* + %11 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %10, i32 0, i32 0 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__ctl(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Qubit* }* + %6 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %5, i32 0, i32 0 + store %Qubit* %target, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %4, %Tuple* null) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %8, %Tuple* null) + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Qubit* }* + %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 + store %Qubit* %target, %Qubit** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__23__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__23__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__23__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__23__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___3c45b07de5f646989fd7fbe3113e5a3d_ApplyWithCA__ctladj(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Qubit* }* + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 1 + store %Array* %controlRegister, %Array** %11, align 8 + store %Qubit* %target, %Qubit** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Qubit* }* + %16 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %15, i32 0, i32 0 + store %Qubit* %target, %Qubit** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %14, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__CZ__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Canon__CZ__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Canon__CZ__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__CZ__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Canon__CZ__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__HY__adj(%Qubit* %target) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %target) + call void @__quantum__qis__h__body(%Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Canon__HY__ctl(%Array* %__controlQubits__, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__HY__ctladj(%Array* %__controlQubits__, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctladj(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rng) { +entry: + %0 = extractvalue %Range %rng, 0 + %1 = extractvalue %Range %rng, 1 + %2 = extractvalue %Range %rng, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %3 = icmp sgt i64 %1, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %0, %preheader__1 ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %idx, %2 + %5 = icmp sge i64 %idx, %2 + %6 = select i1 %3, i1 %4, i1 %5 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + ret i1 false + +exiting__1: ; No predecessors! + %7 = add i64 %idx, %1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret i1 true +} + +define internal void @Lifted__PartialApplication__26__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__body({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__adj({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__ctl(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__ctladj(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__12__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__12__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__body({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %nIndex to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %nStates = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %7, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %8 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nStates, i64 %nUnitaries) + %9 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxOp = phi i64 [ 0, %entry ], [ %24, %exiting__1 ] + %10 = icmp sle i64 %idxOp, %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64 }* + %13 = getelementptr inbounds { i64 }, { i64 }* %12, i32 0, i32 0 + store i64 %idxOp, i64* %13, align 4 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %11, %Tuple* %14) + %15 = bitcast %Tuple* %14 to { %Callable* }* + %16 = getelementptr inbounds { %Callable* }, { %Callable* }* %15, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @Microsoft__Quantum__Canon___91404c39c82d4109956a843f5ebe997e_ControlledOnInt__body(i64 %idxOp, %Callable* %17) + %19 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, %Array* }* + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + store %Array* %19, %Array** %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %idxOp, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + %25 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__adj({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryFunction__ = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %__qsVar0__nIndex__ to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %__qsVar1__nStates__ = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %7, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %8 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar1__nStates__, i64 %__qsVar2__nUnitaries__) + %9 = sub i64 %8, 1 + %10 = sub i64 %9, 0 + %11 = sdiv i64 %10, 1 + %12 = mul i64 1, %11 + %13 = add i64 0, %12 + %14 = insertvalue %Range zeroinitializer, i64 %13, 0 + %15 = insertvalue %Range %14, i64 -1, 1 + %16 = insertvalue %Range %15, i64 0, 2 + %17 = extractvalue %Range %16, 0 + %18 = extractvalue %Range %16, 1 + %19 = extractvalue %Range %16, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %20 = icmp sgt i64 %18, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar4__idxOp__ = phi i64 [ %17, %preheader__1 ], [ %38, %exiting__1 ] + %21 = icmp sle i64 %__qsVar4__idxOp__, %19 + %22 = icmp sge i64 %__qsVar4__idxOp__, %19 + %23 = select i1 %20, i1 %21, i1 %22 + br i1 %23, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64 }* + %26 = getelementptr inbounds { i64 }, { i64 }* %25, i32 0, i32 0 + store i64 %__qsVar4__idxOp__, i64* %26, align 4 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__unitaryFunction__, %Tuple* %24, %Tuple* %27) + %28 = bitcast %Tuple* %27 to { %Callable* }* + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + %31 = call %Callable* @Microsoft__Quantum__Canon___91404c39c82d4109956a843f5ebe997e_ControlledOnInt__body(i64 %__qsVar4__idxOp__, %Callable* %30) + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %32) + %33 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { %Array*, %Array* }* + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + store %Array* %33, %Array** %36, align 8 + store %Array* %target, %Array** %37, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %34, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %38 = add i64 %__qsVar4__idxOp__, %18 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + %39 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %9 = trunc i64 %nIndex to i32 + %10 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %9) + %nStates = fptosi double %10 to i64 + %11 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %11, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %12 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nStates, i64 %nUnitaries) + %13 = sub i64 %12, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxOp = phi i64 [ 0, %entry ], [ %33, %exiting__1 ] + %14 = icmp sle i64 %idxOp, %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { i64 }* + %17 = getelementptr inbounds { i64 }, { i64 }* %16, i32 0, i32 0 + store i64 %idxOp, i64* %17, align 4 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %15, %Tuple* %18) + %19 = bitcast %Tuple* %18 to { %Callable* }* + %20 = getelementptr inbounds { %Callable* }, { %Callable* }* %19, i32 0, i32 0 + %21 = load %Callable*, %Callable** %20, align 8 + %22 = call %Callable* @Microsoft__Quantum__Canon___91404c39c82d4109956a843f5ebe997e_ControlledOnInt__body(i64 %idxOp, %Callable* %21) + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %24 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array*, %Array* }* + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 1 + store %Array* %24, %Array** %27, align 8 + store %Array* %target, %Array** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Array*, { %Array*, %Array* }* }* + %31 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %30, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %31, align 8 + store { %Array*, %Array* }* %26, { %Array*, %Array* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %29, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %idxOp, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + %34 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___dae13232a52742a7a1f5472999176521_MultiplexOperationsBruteForceFromGenerator__ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryFunction__ = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %9 = trunc i64 %__qsVar0__nIndex__ to i32 + %10 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %9) + %__qsVar1__nStates__ = fptosi double %10 to i64 + %11 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %11, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %12 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar1__nStates__, i64 %__qsVar2__nUnitaries__) + %13 = sub i64 %12, 1 + %14 = sub i64 %13, 0 + %15 = sdiv i64 %14, 1 + %16 = mul i64 1, %15 + %17 = add i64 0, %16 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %24 = icmp sgt i64 %22, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar4__idxOp__ = phi i64 [ %21, %preheader__1 ], [ %46, %exiting__1 ] + %25 = icmp sle i64 %__qsVar4__idxOp__, %23 + %26 = icmp sge i64 %__qsVar4__idxOp__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64 }* + %30 = getelementptr inbounds { i64 }, { i64 }* %29, i32 0, i32 0 + store i64 %__qsVar4__idxOp__, i64* %30, align 4 + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__unitaryFunction__, %Tuple* %28, %Tuple* %31) + %32 = bitcast %Tuple* %31 to { %Callable* }* + %33 = getelementptr inbounds { %Callable* }, { %Callable* }* %32, i32 0, i32 0 + %34 = load %Callable*, %Callable** %33, align 8 + %35 = call %Callable* @Microsoft__Quantum__Canon___91404c39c82d4109956a843f5ebe997e_ControlledOnInt__body(i64 %__qsVar4__idxOp__, %Callable* %34) + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %37 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %38 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %39 = bitcast %Tuple* %38 to { %Array*, %Array* }* + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + store %Array* %37, %Array** %40, align 8 + store %Array* %target, %Array** %41, align 8 + %42 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %43 = bitcast %Tuple* %42 to { %Array*, { %Array*, %Array* }* }* + %44 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %43, i32 0, i32 0 + %45 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %43, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %44, align 8 + store { %Array*, %Array* }* %39, { %Array*, %Array* }** %45, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %42, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %38, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %46 = add i64 %__qsVar4__idxOp__, %22 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + %47 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon__MultiplexerFromGenerator__body(i64 %0, %Callable* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %unitaryGenerator = bitcast %Tuple* %2 to { i64, %Callable* }* + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %4 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store %Callable* %1, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Callable* }* }* getelementptr ({ %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store %Callable* %5, %Callable** %8, align 8 + store { i64, %Callable* }* %unitaryGenerator, { i64, %Callable* }** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__27__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__13__FunctionTable, %Tuple* %6) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret %Callable* %10 +} + +define internal void @Lifted__PartialApplication__27__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__27__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__27__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__27__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__body({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__adj({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__ctl(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__ctladj(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__13__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__13__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__body({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %5, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, %Callable* }* getelementptr ({ i64, i64, %Callable* }, { i64, i64, %Callable* }* null, i32 1) to i64)) + %unitaryGeneratorWithOffset = bitcast %Tuple* %6 to { i64, i64, %Callable* }* + %7 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGeneratorWithOffset, i32 0, i32 0 + %8 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGeneratorWithOffset, i32 0, i32 1 + %9 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGeneratorWithOffset, i32 0, i32 2 + store i64 %nUnitaries, i64* %7, align 4 + store i64 0, i64* %8, align 4 + store %Callable* %unitaryFunction, %Callable** %9, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %10 = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %11 = icmp eq i64 %10, 0 + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @22, i32 0, i32 0)) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__fail(%String* %12) + unreachable + +continue__1: ; preds = %entry + %13 = icmp sgt i64 %nUnitaries, 0 + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %auxiliary = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____adj({ i64, i64, %Callable* }* %unitaryGeneratorWithOffset, %Array* %auxiliary, { %Array* }* %index, %Array* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__adj({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar1__unitaryFunction__ = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar0__nUnitaries__ = load i64, i64* %5, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, %Callable* }* getelementptr ({ i64, i64, %Callable* }, { i64, i64, %Callable* }* null, i32 1) to i64)) + %__qsVar2__unitaryGeneratorWithOffset__ = bitcast %Tuple* %6 to { i64, i64, %Callable* }* + %7 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar2__unitaryGeneratorWithOffset__, i32 0, i32 0 + %8 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar2__unitaryGeneratorWithOffset__, i32 0, i32 1 + %9 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar2__unitaryGeneratorWithOffset__, i32 0, i32 2 + store i64 %__qsVar0__nUnitaries__, i64* %7, align 4 + store i64 0, i64* %8, align 4 + store %Callable* %__qsVar1__unitaryFunction__, %Callable** %9, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %10 = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %11 = icmp eq i64 %10, 0 + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @22, i32 0, i32 0)) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__fail(%String* %12) + unreachable + +continue__1: ; preds = %entry + %13 = icmp sgt i64 %__qsVar0__nUnitaries__, 0 + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %__qsVar3__auxiliary__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar3__auxiliary__, i32 1) + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____body({ i64, i64, %Callable* }* %__qsVar2__unitaryGeneratorWithOffset__, %Array* %__qsVar3__auxiliary__, { %Array* }* %index, %Array* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar3__auxiliary__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar3__auxiliary__, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %9, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, %Callable* }* getelementptr ({ i64, i64, %Callable* }, { i64, i64, %Callable* }* null, i32 1) to i64)) + %unitaryGeneratorWithOffset = bitcast %Tuple* %10 to { i64, i64, %Callable* }* + %11 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGeneratorWithOffset, i32 0, i32 0 + %12 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGeneratorWithOffset, i32 0, i32 1 + %13 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGeneratorWithOffset, i32 0, i32 2 + store i64 %nUnitaries, i64* %11, align 4 + store i64 0, i64* %12, align 4 + store %Callable* %unitaryFunction, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %15 = icmp eq i64 %14, 0 + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @22, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__fail(%String* %16) + unreachable + +continue__1: ; preds = %entry + %17 = icmp sgt i64 %nUnitaries, 0 + br i1 %17, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %auxiliary = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* getelementptr ({ { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* + %20 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19, i32 0, i32 3 + store { i64, i64, %Callable* }* %unitaryGeneratorWithOffset, { i64, i64, %Callable* }** %20, align 8 + store %Array* %auxiliary, %Array** %21, align 8 + store { %Array* }* %index, { %Array* }** %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____ctladj(%Array* %__controlQubits__, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4b6c2741135544ffade7e32677715e11_MultiplexOperationsFromGenerator__ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar1__unitaryFunction__ = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar0__nUnitaries__ = load i64, i64* %9, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, %Callable* }* getelementptr ({ i64, i64, %Callable* }, { i64, i64, %Callable* }* null, i32 1) to i64)) + %__qsVar2__unitaryGeneratorWithOffset__ = bitcast %Tuple* %10 to { i64, i64, %Callable* }* + %11 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar2__unitaryGeneratorWithOffset__, i32 0, i32 0 + %12 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar2__unitaryGeneratorWithOffset__, i32 0, i32 1 + %13 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar2__unitaryGeneratorWithOffset__, i32 0, i32 2 + store i64 %__qsVar0__nUnitaries__, i64* %11, align 4 + store i64 0, i64* %12, align 4 + store %Callable* %__qsVar1__unitaryFunction__, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %15 = icmp eq i64 %14, 0 + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @22, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__fail(%String* %16) + unreachable + +continue__1: ; preds = %entry + %17 = icmp sgt i64 %__qsVar0__nUnitaries__, 0 + br i1 %17, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %__qsVar3__auxiliary__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar3__auxiliary__, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar3__auxiliary__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* getelementptr ({ { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* + %20 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19, i32 0, i32 3 + store { i64, i64, %Callable* }* %__qsVar2__unitaryGeneratorWithOffset__, { i64, i64, %Callable* }** %20, align 8 + store %Array* %__qsVar3__auxiliary__, %Array** %21, align 8 + store { %Array* }* %index, { %Array* }** %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____ctl(%Array* %__controlQubits__, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %19) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar3__auxiliary__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar3__auxiliary__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar3__auxiliary__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll0__body(double %phase, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %3, i32 0, i32 1 + store %Callable* %0, %Callable** %4, align 8 + store %Callable* %1, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__28__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__14__FunctionTable, %Tuple* %2) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__RAll1__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double %phase, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__29__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %8) + call void @Microsoft__Quantum__Canon___b8eceb214cf14baa8c0de4f62e56342e_ApplyWithCA__body(%Callable* %6, %Callable* %12, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b8eceb214cf14baa8c0de4f62e56342e_ApplyWithCA__body(%Callable* %outerOperation, %Callable* %innerOperation, %Array* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %target, %Array** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %0, %Tuple* null) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array* }* + %5 = getelementptr inbounds { %Array* }, { %Array* }* %4, i32 0, i32 0 + store %Array* %target, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %innerOperation, %Tuple* %3, %Tuple* null) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array* }* + %9 = getelementptr inbounds { %Array* }, { %Array* }* %8, i32 0, i32 0 + store %Array* %target, %Array** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, %Array* }* %9, { %Callable*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, %Array* }* %9, { %Callable*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__body(%Callable* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__adj(%Callable* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, %Array* }*, { %Callable*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctl(%Array* %3, { %Callable*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, %Array* }*, { %Callable*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__ctladj(%Array* %3, { %Callable*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__14__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__14__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__29__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon__RAll1__body(double %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon__RAll1__adj(double %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon__RAll1__ctl(%Array* %3, { double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon__RAll1__ctladj(%Array* %3, { double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__15__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__15__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll1__body(double %phase, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call { %Qubit*, %Array* }* @Microsoft__Quantum__Arrays___775dd5af95874d5da5182ed798734ac2_HeadAndRest__body(%Array* %qubits) + %1 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %0, i32 0, i32 0 + %flagQubit = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %0, i32 0, i32 1 + %systemRegister = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %systemRegister, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__R1__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double %phase, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__36__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %4) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %systemRegister, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Qubit* }* + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 1 + store %Array* %systemRegister, %Array** %11, align 8 + store %Qubit* %flagQubit, %Qubit** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %systemRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %systemRegister, i32 -1) + %13 = bitcast { %Qubit*, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %systemRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll1__adj(double %phase, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call { %Qubit*, %Array* }* @Microsoft__Quantum__Arrays___775dd5af95874d5da5182ed798734ac2_HeadAndRest__body(%Array* %qubits) + %1 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %0, i32 0, i32 0 + %__qsVar0__flagQubit__ = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %0, i32 0, i32 1 + %__qsVar1__systemRegister__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__systemRegister__, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__R1__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double %phase, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__37__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %4) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__systemRegister__, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Qubit* }* + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 1 + store %Array* %__qsVar1__systemRegister__, %Array** %11, align 8 + store %Qubit* %__qsVar0__flagQubit__, %Qubit** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__systemRegister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__systemRegister__, i32 -1) + %13 = bitcast { %Qubit*, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__systemRegister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll1__ctl(%Array* %__controlQubits__, { double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %phase = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call { %Qubit*, %Array* }* @Microsoft__Quantum__Arrays___775dd5af95874d5da5182ed798734ac2_HeadAndRest__body(%Array* %qubits) + %4 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %3, i32 0, i32 0 + %flagQubit = load %Qubit*, %Qubit** %4, align 8 + %5 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %3, i32 0, i32 1 + %systemRegister = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %systemRegister, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__R1__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Callable*, double }* + %9 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %8, i32 0, i32 1 + store %Callable* %6, %Callable** %9, align 8 + store double %phase, double* %10, align 8 + %11 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__38__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %systemRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %systemRegister, %Array** %14, align 8 + store %Qubit* %flagQubit, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %systemRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %systemRegister, i32 -1) + %20 = bitcast { %Qubit*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %systemRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll1__ctladj(%Array* %__controlQubits__, { double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %phase = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call { %Qubit*, %Array* }* @Microsoft__Quantum__Arrays___775dd5af95874d5da5182ed798734ac2_HeadAndRest__body(%Array* %qubits) + %4 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__flagQubit__ = load %Qubit*, %Qubit** %4, align 8 + %5 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__systemRegister__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__systemRegister__, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__R1__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Callable*, double }* + %9 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %8, i32 0, i32 1 + store %Callable* %6, %Callable** %9, align 8 + store double %phase, double* %10, align 8 + %11 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__39__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__systemRegister__, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %__qsVar1__systemRegister__, %Array** %14, align 8 + store %Qubit* %__qsVar0__flagQubit__, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__systemRegister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__systemRegister__, i32 -1) + %20 = bitcast { %Qubit*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__systemRegister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll0__adj(double %phase, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %3, i32 0, i32 1 + store %Callable* %0, %Callable** %4, align 8 + store %Callable* %1, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__30__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__14__FunctionTable, %Tuple* %2) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__RAll1__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double %phase, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__31__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %8) + call void @Microsoft__Quantum__Canon___b8eceb214cf14baa8c0de4f62e56342e_ApplyWithCA__adj(%Callable* %6, %Callable* %12, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b8eceb214cf14baa8c0de4f62e56342e_ApplyWithCA__adj(%Callable* %outerOperation, %Callable* %innerOperation, %Array* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Array* }* + %3 = getelementptr inbounds { %Array* }, { %Array* }* %2, i32 0, i32 0 + store %Array* %target, %Array** %3, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %1, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %target, %Array** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array* }* + %11 = getelementptr inbounds { %Array* }, { %Array* }* %10, i32 0, i32 0 + store %Array* %target, %Array** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, %Array* }* %9, { %Callable*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, %Array* }* %9, { %Callable*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll0__ctl(%Array* %__controlQubits__, { double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %phase = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Callable* }* + %7 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Callable* %4, %Callable** %8, align 8 + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__32__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__14__FunctionTable, %Tuple* %5) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__RAll1__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double }* + %13 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store double %phase, double* %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__33__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Array* }* getelementptr ({ %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %17, i32 0, i32 2 + store %Callable* %9, %Callable** %18, align 8 + store %Callable* %15, %Callable** %19, align 8 + store %Array* %qubits, %Array** %20, align 8 + call void @Microsoft__Quantum__Canon___b8eceb214cf14baa8c0de4f62e56342e_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Array* }* %17) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b8eceb214cf14baa8c0de4f62e56342e_ApplyWithCA__ctl(%Array* %controlRegister, { %Callable*, %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + store %Array* %target, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %4, %Tuple* null) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %target, %Array** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %8, %Tuple* null) + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %target, %Array** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, %Array* }* %9, { %Callable*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, %Array* }* %9, { %Callable*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__RAll0__ctladj(%Array* %__controlQubits__, { double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %phase = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___45b59a51e4694638a6ed8a754a6e18f6_ApplyToEachCA__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Callable* }* + %7 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Callable* %4, %Callable** %8, align 8 + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__34__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__14__FunctionTable, %Tuple* %5) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__RAll1__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double }* + %13 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store double %phase, double* %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__35__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Array* }* getelementptr ({ %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %17, i32 0, i32 2 + store %Callable* %9, %Callable** %18, align 8 + store %Callable* %15, %Callable** %19, align 8 + store %Array* %qubits, %Array** %20, align 8 + call void @Microsoft__Quantum__Canon___b8eceb214cf14baa8c0de4f62e56342e_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Array* }* %17) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b8eceb214cf14baa8c0de4f62e56342e_ApplyWithCA__ctladj(%Array* %controlRegister, { %Callable*, %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Array* }, { %Callable*, %Callable*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %target, %Array** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Array* }* + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %10, i32 0, i32 1 + store %Array* %controlRegister, %Array** %11, align 8 + store %Array* %target, %Array** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + store %Array* %target, %Array** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %14, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, %Array* }* %9, { %Callable*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, %Array* }* }, { %Array*, { %Callable*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, %Array* }* %9, { %Callable*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal { %Qubit*, %Array* }* @Microsoft__Quantum__Arrays___775dd5af95874d5da5182ed798734ac2_HeadAndRest__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %array) + %1 = call %Array* @Microsoft__Quantum__Arrays___8eccbbf2f2c44c66bcf118fa86e46f90_Rest__body(%Array* %array) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Array* }* getelementptr ({ %Qubit*, %Array* }, { %Qubit*, %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Qubit*, %Array* }* + %4 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Qubit*, %Array* }, { %Qubit*, %Array* }* %3, i32 0, i32 1 + store %Qubit* %0, %Qubit** %4, align 8 + store %Array* %1, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret { %Qubit*, %Array* }* %3 +} + +define internal void @Lifted__PartialApplication__36__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__R1__body(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__R1__adj(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__R1__ctl(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__R1__ctladj(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__16__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__16__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__37__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__38__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__38__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__38__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__38__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__39__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__39__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__39__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__39__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____body({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, double, %Array* }* + %7 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 2 + store i64 %idx, i64* %7, align 4 + store double %stepSize, double* %8, align 8 + store %Array* %target, %Array** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %5, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____adj({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + %4 = sub i64 %3, 0 + %5 = sdiv i64 %4, 1 + %6 = mul i64 1, %5 + %7 = add i64 0, %6 + %8 = insertvalue %Range zeroinitializer, i64 %7, 0 + %9 = insertvalue %Range %8, i64 -1, 1 + %10 = insertvalue %Range %9, i64 0, 2 + %11 = extractvalue %Range %10, 0 + %12 = extractvalue %Range %10, 1 + %13 = extractvalue %Range %10, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %14 = icmp sgt i64 %12, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idx__ = phi i64 [ %11, %preheader__1 ], [ %24, %exiting__1 ] + %15 = icmp sle i64 %__qsVar0__idx__, %13 + %16 = icmp sge i64 %__qsVar0__idx__, %13 + %17 = select i1 %14, i1 %15, i1 %16 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { i64, double, %Array* }* + %21 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 1 + %23 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %21, align 4 + store double %stepSize, double* %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %__qsVar0__idx__, %12 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idx, %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, double, %Array* }* + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 2 + store i64 %idx, i64* %12, align 4 + store double %stepSize, double* %13, align 8 + store %Array* %target, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { i64, double, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store { i64, double, %Array* }* %11, { i64, double, %Array* }** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + %8 = sub i64 %7, 0 + %9 = sdiv i64 %8, 1 + %10 = mul i64 1, %9 + %11 = add i64 0, %10 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idx__ = phi i64 [ %15, %preheader__1 ], [ %32, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idx__, %17 + %20 = icmp sge i64 %__qsVar0__idx__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, double, %Array* }* + %25 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %25, align 4 + store double %stepSize, double* %26, align 8 + store %Array* %target, %Array** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Array*, { i64, double, %Array* }* }* + %30 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %29, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %30, align 8 + store { i64, double, %Array* }* %24, { i64, double, %Array* }** %31, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %28, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %32 = add i64 %__qsVar0__idx__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____adj({ i64, i64, %Callable* }* %unitaryGenerator, %Array* %auxiliary, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGenerator, i32 0, i32 2 + %__qsVar4__unitaryFunction__ = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + %1 = bitcast { i64, i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %__qsVar0__nIndex__ to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %__qsVar1__nStates__ = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %7, align 4 + %8 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryOffset__ = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + %9 = sdiv i64 %__qsVar1__nStates__, 2 + %__qsVar5__nUnitariesLeft__ = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar2__nUnitaries__, i64 %9) + %__qsVar6__nUnitariesRight__ = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar2__nUnitaries__, i64 %__qsVar1__nStates__) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, %Callable* }* getelementptr ({ i64, i64, %Callable* }, { i64, i64, %Callable* }* null, i32 1) to i64)) + %__qsVar7__leftUnitaries__ = bitcast %Tuple* %10 to { i64, i64, %Callable* }* + %11 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar7__leftUnitaries__, i32 0, i32 0 + %12 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar7__leftUnitaries__, i32 0, i32 1 + %13 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar7__leftUnitaries__, i32 0, i32 2 + store i64 %__qsVar5__nUnitariesLeft__, i64* %11, align 4 + store i64 %__qsVar3__unitaryOffset__, i64* %12, align 4 + store %Callable* %__qsVar4__unitaryFunction__, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %14 = sub i64 %__qsVar6__nUnitariesRight__, %__qsVar5__nUnitariesLeft__ + %15 = add i64 %__qsVar3__unitaryOffset__, %__qsVar5__nUnitariesLeft__ + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, %Callable* }* getelementptr ({ i64, i64, %Callable* }, { i64, i64, %Callable* }* null, i32 1) to i64)) + %__qsVar8__rightUnitaries__ = bitcast %Tuple* %16 to { i64, i64, %Callable* }* + %17 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar8__rightUnitaries__, i32 0, i32 0 + %18 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar8__rightUnitaries__, i32 0, i32 1 + %19 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %__qsVar8__rightUnitaries__, i32 0, i32 2 + store i64 %14, i64* %17, align 4 + store i64 %15, i64* %18, align 4 + store %Callable* %__qsVar4__unitaryFunction__, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %20 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %3) + %__qsVar9__newControls__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %20) + %21 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar9__newControls__, i32 0, i32 0 + %22 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %22, i32 1) + %23 = bitcast { %Array* }* %__qsVar9__newControls__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 1) + %24 = icmp sgt i64 %__qsVar2__nUnitaries__, 0 + br i1 %24, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %auxiliary) + %26 = icmp eq i64 %25, 1 + br i1 %26, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %then0__1 + %27 = icmp eq i64 %__qsVar0__nIndex__, 0 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %then0__1 + %28 = phi i1 [ %27, %condTrue__1 ], [ %26, %then0__1 ] + br i1 %28, label %then0__2, label %test1__1 + +then0__2: ; preds = %condContinue__1 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { i64 }* + %31 = getelementptr inbounds { i64 }, { i64 }* %30, i32 0, i32 0 + store i64 %__qsVar3__unitaryOffset__, i64* %31, align 4 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar4__unitaryFunction__, %Tuple* %29, %Tuple* %32) + %33 = bitcast %Tuple* %32 to { %Callable* }* + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { %Array*, %Array* }* + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 1 + store %Array* %auxiliary, %Array** %39, align 8 + store %Array* %target, %Array** %40, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %37, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + br label %continue__2 + +test1__1: ; preds = %condContinue__1 + %41 = call i64 @__quantum__rt__array_get_size_1d(%Array* %auxiliary) + %42 = icmp eq i64 %41, 0 + br i1 %42, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %test1__1 + %43 = icmp sge i64 %__qsVar0__nIndex__, 1 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %test1__1 + %44 = phi i1 [ %43, %condTrue__2 ], [ %42, %test1__1 ] + br i1 %44, label %then1__1, label %else__1 + +then1__1: ; preds = %condContinue__2 + %__qsVar10__newauxiliary__ = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %3) + call void @__quantum__qis__x__body(%Qubit* %__qsVar10__newauxiliary__) + %45 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %45, i64 0) + %47 = bitcast i8* %46 to %Qubit** + store %Qubit* %__qsVar10__newauxiliary__, %Qubit** %47, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____adj({ i64, i64, %Callable* }* %__qsVar7__leftUnitaries__, %Array* %45, { %Array* }* %__qsVar9__newControls__, %Array* %target) + call void @__quantum__qis__x__body(%Qubit* %__qsVar10__newauxiliary__) + %48 = icmp sgt i64 %__qsVar6__nUnitariesRight__, 0 + br i1 %48, label %then0__3, label %continue__3 + +then0__3: ; preds = %then1__1 + %49 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 0) + %51 = bitcast i8* %50 to %Qubit** + store %Qubit* %__qsVar10__newauxiliary__, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____adj({ i64, i64, %Callable* }* %__qsVar8__rightUnitaries__, %Array* %49, { %Array* }* %__qsVar9__newControls__, %Array* %target) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + br label %continue__2 + +else__1: ; preds = %condContinue__2 + %52 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %3) + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to %Qubit** + store %Qubit* %52, %Qubit** %55, align 8 + %__qsVar11__controls__ = call %Array* @__quantum__rt__array_concatenate(%Array* %53, %Array* %auxiliary) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__controls__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__controls__, i32 1) + %__qsVar12__newauxiliary__ = call %Qubit* @__quantum__rt__qubit_allocate() + %56 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar11__controls__) + %57 = sub i64 %56, 2 + %58 = call i64 @Microsoft__Quantum__Math__MaxI__body(i64 0, i64 %57) + %__qsVar13__andauxiliary__ = call %Array* @__quantum__rt__qubit_allocate_array(i64 %58) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar13__andauxiliary__, i32 1) + call void @Microsoft__Quantum__Canon____QsRef3__ApplyAndChain____body(%Array* %__qsVar13__andauxiliary__, %Array* %__qsVar11__controls__, %Qubit* %__qsVar12__newauxiliary__) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__qis__x__ctl(%Array* %auxiliary, %Qubit* %__qsVar12__newauxiliary__) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + %59 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 0) + %61 = bitcast i8* %60 to %Qubit** + store %Qubit* %__qsVar12__newauxiliary__, %Qubit** %61, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____adj({ i64, i64, %Callable* }* %__qsVar7__leftUnitaries__, %Array* %59, { %Array* }* %__qsVar9__newControls__, %Array* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__qis__x__ctl(%Array* %auxiliary, %Qubit* %__qsVar12__newauxiliary__) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + %62 = icmp sgt i64 %__qsVar6__nUnitariesRight__, 0 + br i1 %62, label %then0__4, label %continue__4 + +then0__4: ; preds = %else__1 + %63 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %63, i64 0) + %65 = bitcast i8* %64 to %Qubit** + store %Qubit* %__qsVar12__newauxiliary__, %Qubit** %65, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____adj({ i64, i64, %Callable* }* %__qsVar8__rightUnitaries__, %Array* %63, { %Array* }* %__qsVar9__newControls__, %Array* %target) + call void @__quantum__rt__array_update_reference_count(%Array* %63, i32 -1) + br label %continue__4 + +continue__4: ; preds = %then0__4, %else__1 + call void @Microsoft__Quantum__Canon____QsRef3__ApplyAndChain____adj(%Array* %__qsVar13__andauxiliary__, %Array* %__qsVar11__controls__, %Qubit* %__qsVar12__newauxiliary__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar13__andauxiliary__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %__qsVar13__andauxiliary__) + call void @__quantum__rt__qubit_release(%Qubit* %__qsVar12__newauxiliary__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__controls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__controls__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__controls__, i32 -1) + br label %continue__2 + +continue__2: ; preds = %continue__4, %continue__3, %then0__2 + br label %continue__1 + +continue__1: ; preds = %continue__2, %entry + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %22, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar4__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____body({ i64, i64, %Callable* }* %unitaryGenerator, %Array* %auxiliary, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGenerator, i32 0, i32 2 + %unitaryFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %1 = bitcast { i64, i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %nIndex to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %nStates = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %7, align 4 + %8 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryOffset = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %9 = sdiv i64 %nStates, 2 + %nUnitariesLeft = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nUnitaries, i64 %9) + %nUnitariesRight = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nUnitaries, i64 %nStates) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, %Callable* }* getelementptr ({ i64, i64, %Callable* }, { i64, i64, %Callable* }* null, i32 1) to i64)) + %leftUnitaries = bitcast %Tuple* %10 to { i64, i64, %Callable* }* + %11 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %leftUnitaries, i32 0, i32 0 + %12 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %leftUnitaries, i32 0, i32 1 + %13 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %leftUnitaries, i32 0, i32 2 + store i64 %nUnitariesLeft, i64* %11, align 4 + store i64 %unitaryOffset, i64* %12, align 4 + store %Callable* %unitaryFunction, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %14 = sub i64 %nUnitariesRight, %nUnitariesLeft + %15 = add i64 %unitaryOffset, %nUnitariesLeft + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, %Callable* }* getelementptr ({ i64, i64, %Callable* }, { i64, i64, %Callable* }* null, i32 1) to i64)) + %rightUnitaries = bitcast %Tuple* %16 to { i64, i64, %Callable* }* + %17 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %rightUnitaries, i32 0, i32 0 + %18 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %rightUnitaries, i32 0, i32 1 + %19 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %rightUnitaries, i32 0, i32 2 + store i64 %14, i64* %17, align 4 + store i64 %15, i64* %18, align 4 + store %Callable* %unitaryFunction, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %20 = call %Array* @Microsoft__Quantum__Arrays___11e60deb067d435786055b3275e916c8_Most__body(%Array* %3) + %newControls = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %20) + %21 = getelementptr inbounds { %Array* }, { %Array* }* %newControls, i32 0, i32 0 + %22 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %22, i32 1) + %23 = bitcast { %Array* }* %newControls to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 1) + %24 = icmp sgt i64 %nUnitaries, 0 + br i1 %24, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %auxiliary) + %26 = icmp eq i64 %25, 1 + br i1 %26, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %then0__1 + %27 = icmp eq i64 %nIndex, 0 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %then0__1 + %28 = phi i1 [ %27, %condTrue__1 ], [ %26, %then0__1 ] + br i1 %28, label %then0__2, label %test1__1 + +then0__2: ; preds = %condContinue__1 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { i64 }* + %31 = getelementptr inbounds { i64 }, { i64 }* %30, i32 0, i32 0 + store i64 %unitaryOffset, i64* %31, align 4 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %29, %Tuple* %32) + %33 = bitcast %Tuple* %32 to { %Callable* }* + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { %Array*, %Array* }* + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 1 + store %Array* %auxiliary, %Array** %39, align 8 + store %Array* %target, %Array** %40, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %37, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + br label %continue__2 + +test1__1: ; preds = %condContinue__1 + %41 = call i64 @__quantum__rt__array_get_size_1d(%Array* %auxiliary) + %42 = icmp eq i64 %41, 0 + br i1 %42, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %test1__1 + %43 = icmp sge i64 %nIndex, 1 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %test1__1 + %44 = phi i1 [ %43, %condTrue__2 ], [ %42, %test1__1 ] + br i1 %44, label %then1__1, label %else__1 + +then1__1: ; preds = %condContinue__2 + %newauxiliary = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %3) + %45 = icmp sgt i64 %nUnitariesRight, 0 + br i1 %45, label %then0__3, label %continue__3 + +then0__3: ; preds = %then1__1 + %46 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %46, i64 0) + %48 = bitcast i8* %47 to %Qubit** + store %Qubit* %newauxiliary, %Qubit** %48, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____body({ i64, i64, %Callable* }* %rightUnitaries, %Array* %46, { %Array* }* %newControls, %Array* %target) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then1__1 + call void @__quantum__qis__x__body(%Qubit* %newauxiliary) + %49 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 0) + %51 = bitcast i8* %50 to %Qubit** + store %Qubit* %newauxiliary, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____body({ i64, i64, %Callable* }* %leftUnitaries, %Array* %49, { %Array* }* %newControls, %Array* %target) + call void @__quantum__qis__x__body(%Qubit* %newauxiliary) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %continue__2 + +else__1: ; preds = %condContinue__2 + %52 = call %Qubit* @Microsoft__Quantum__Arrays___9d7834b9a7ce4d8cb1a6bd58bdfd2736_Tail__body(%Array* %3) + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to %Qubit** + store %Qubit* %52, %Qubit** %55, align 8 + %controls = call %Array* @__quantum__rt__array_concatenate(%Array* %53, %Array* %auxiliary) + call void @__quantum__rt__array_update_reference_count(%Array* %controls, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 1) + %newauxiliary__1 = call %Qubit* @__quantum__rt__qubit_allocate() + %56 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controls) + %57 = sub i64 %56, 2 + %58 = call i64 @Microsoft__Quantum__Math__MaxI__body(i64 0, i64 %57) + %andauxiliary = call %Array* @__quantum__rt__qubit_allocate_array(i64 %58) + call void @__quantum__rt__array_update_alias_count(%Array* %andauxiliary, i32 1) + call void @Microsoft__Quantum__Canon____QsRef3__ApplyAndChain____body(%Array* %andauxiliary, %Array* %controls, %Qubit* %newauxiliary__1) + %59 = icmp sgt i64 %nUnitariesRight, 0 + br i1 %59, label %then0__4, label %continue__4 + +then0__4: ; preds = %else__1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to %Qubit** + store %Qubit* %newauxiliary__1, %Qubit** %62, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____body({ i64, i64, %Callable* }* %rightUnitaries, %Array* %60, { %Array* }* %newControls, %Array* %target) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + br label %continue__4 + +continue__4: ; preds = %then0__4, %else__1 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__qis__x__ctl(%Array* %auxiliary, %Qubit* %newauxiliary__1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + %63 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %63, i64 0) + %65 = bitcast i8* %64 to %Qubit** + store %Qubit* %newauxiliary__1, %Qubit** %65, align 8 + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____body({ i64, i64, %Callable* }* %leftUnitaries, %Array* %63, { %Array* }* %newControls, %Array* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__qis__x__ctl(%Array* %auxiliary, %Qubit* %newauxiliary__1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @Microsoft__Quantum__Canon____QsRef3__ApplyAndChain____adj(%Array* %andauxiliary, %Array* %controls, %Qubit* %newauxiliary__1) + call void @__quantum__rt__array_update_alias_count(%Array* %andauxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %63, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %andauxiliary) + call void @__quantum__rt__qubit_release(%Qubit* %newauxiliary__1) + call void @__quantum__rt__array_update_alias_count(%Array* %controls, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controls, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controls, i32 -1) + br label %continue__2 + +continue__2: ; preds = %continue__4, %continue__3, %then0__2 + br label %continue__1 + +continue__1: ; preds = %continue__2, %entry + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %22, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____ctladj(%Array* %controlRegister, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, i64, %Callable* }*, { i64, i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGenerator, i32 0, i32 2 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { i64, i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %auxiliary = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + %6 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %index = load { %Array* }*, { %Array* }** %6, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = call %Array* @__quantum__rt__array_concatenate(%Array* %auxiliary, %Array* %controlRegister) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 1) + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____adj({ i64, i64, %Callable* }* %unitaryGenerator, %Array* %11, { %Array* }* %index, %Array* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____ctl(%Array* %controlRegister, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, i64, %Callable* }*, { i64, i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, i64, %Callable* }, { i64, i64, %Callable* }* %unitaryGenerator, i32 0, i32 2 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { i64, i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %auxiliary = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + %6 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %index = load { %Array* }*, { %Array* }** %6, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }, { { i64, i64, %Callable* }*, %Array*, { %Array* }*, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = call %Array* @__quantum__rt__array_concatenate(%Array* %auxiliary, %Array* %controlRegister) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 1) + call void @Microsoft__Quantum__Canon___c676ff1e4c9940fe9ce03435c793589a___QsRef3__MultiplexOperationsFromGeneratorImpl____body({ i64, i64, %Callable* }* %unitaryGenerator, %Array* %11, { %Array* }* %index, %Array* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Qubit* }* %15, { i64, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Qubit* }* %15, { i64, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__body(i64 %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__adj(i64 %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Qubit* }*, { i64, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__ctl(%Array* %3, { i64, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Qubit* }*, { i64, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__ctladj(%Array* %3, { i64, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__17__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__17__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__body(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___2ded2a97dfe54a5aa67d18473d4668cb_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array*, %Qubit* }* + %4 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %3, i32 0, i32 1 + store %Array* %controlRegister, %Array** %4, align 8 + store %Qubit* %targetRegister, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__adj(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___2ded2a97dfe54a5aa67d18473d4668cb_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %2 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %2) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Qubit* }* + %5 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + store %Array* %controlRegister, %Array** %5, align 8 + store %Qubit* %targetRegister, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__ctl(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___2ded2a97dfe54a5aa67d18473d4668cb_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %targetRegister, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Qubit* }* %9, { %Array*, %Qubit* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6234a07443684162b5952eda90654bb6_ApplyControlledOnInt__ctladj(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___2ded2a97dfe54a5aa67d18473d4668cb_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %targetRegister, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Qubit* }* %9, { %Array*, %Qubit* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___91404c39c82d4109956a843f5ebe997e_ControlledOnInt__body(i64 %numberState, %Callable* %oracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i64, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store i64 %numberState, i64* %4, align 4 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__41__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__18__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__41__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__41__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__41__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Array* }* %15, { i64, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__41__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Array* }* %15, { i64, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__body(i64 %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__adj(i64 %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Array* }*, { i64, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__ctl(%Array* %3, { i64, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Array* }*, { i64, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__ctladj(%Array* %3, { i64, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__18__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__18__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__body(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___c51e836cb2f9442f98db95ade07495ac_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array*, %Array* }* + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + store %Array* %controlRegister, %Array** %4, align 8 + store %Array* %targetRegister, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__adj(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___c51e836cb2f9442f98db95ade07495ac_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %2 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %2) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Array* }* + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + store %Array* %controlRegister, %Array** %5, align 8 + store %Array* %targetRegister, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__ctl(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___c51e836cb2f9442f98db95ade07495ac_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %targetRegister, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f7b44f7729b14fef9eb4e6dc89a14654_ApplyControlledOnInt__ctladj(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___c51e836cb2f9442f98db95ade07495ac_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %targetRegister, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____body(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %9 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %9) + %12 = bitcast i8* %11 to %Callable** + %op = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %target, %Array** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %9, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %17 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %18 = phi i64 [ 0, %exit__2 ], [ %23, %exiting__3 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %22 = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %22, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %23 = add i64 %18, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____adj(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %23, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array* }* + %22 = getelementptr inbounds { %Array* }, { %Array* }* %21, i32 0, i32 0 + store %Array* %target, %Array** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %23 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %24 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %25 = phi i64 [ 0, %exit__2 ], [ %30, %exiting__3 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %25) + %28 = bitcast i8* %27 to %Callable** + %29 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %29, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %30 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %12) + %15 = bitcast i8* %14 to %Callable** + %op = load %Callable*, %Callable** %15, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %16 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %16) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, %Array* }* + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %19, align 8 + store %Array* %target, %Array** %20, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %16, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %22 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %23) + %26 = bitcast i8* %25 to %Callable** + %27 = load %Callable*, %Callable** %26, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %14, i1 true) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %15) + %17 = sub i64 %16, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %target, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %34, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %29) + %32 = bitcast i8* %31 to %Callable** + %33 = load %Callable*, %Callable** %32, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %33, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %34 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___2ded2a97dfe54a5aa67d18473d4668cb_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %bits, %Array** %4, align 8 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__43__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal %Callable* @Microsoft__Quantum__Canon___c51e836cb2f9442f98db95ade07495ac_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %bits, %Array** %4, align 8 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__44__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__21__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal %Callable* @Microsoft__Quantum__Canon___6a714b914d4d48de8a4ad7810cc0d5d1_CurriedOpCA__body(%Callable* %op) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___6d6f9db43d6b47c4a0fad10624f517d9___QsRef3__WithFirstInputAppliedCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Callable* %op, %Callable** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__42__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__19__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %5 +} + +define internal void @Lifted__PartialApplication__42__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, %Array* }* + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %7, i32 0, i32 1 + store %Callable* %2, %Callable** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___6d6f9db43d6b47c4a0fad10624f517d9___QsRef3__WithFirstInputAppliedCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Callable* @Microsoft__Quantum__Canon___6d6f9db43d6b47c4a0fad10624f517d9___QsRef3__WithFirstInputAppliedCA____body(%Callable* %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Callable* }* + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + store %Callable* %5, %Callable** %7, align 8 + ret void +} + +define internal void @MemoryManagement__19__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__19__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___6d6f9db43d6b47c4a0fad10624f517d9___QsRef3__WithFirstInputAppliedCA____body(%Callable* %op, %Array* %arg1) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %arg1, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %arg1, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable*, %Array* }* + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %1, i32 0, i32 1 + store %Callable* %op, %Callable** %2, align 8 + store %Array* %arg1, %Array** %3, align 8 + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__45__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__22__FunctionTable, %Tuple* %0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %arg1, i32 -1) + ret %Callable* %4 +} + +define internal void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____body({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, double, %Array* }* + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 2 + store i64 %idx, i64* %8, align 4 + store double %5, double* %9, align 8 + store %Array* %target, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %6, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %12 = sub i64 %nSteps, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idx__1 = phi i64 [ %12, %preheader__1 ], [ %22, %exiting__2 ] + %13 = icmp sle i64 %idx__1, 0 + %14 = icmp sge i64 %idx__1, 0 + %15 = select i1 false, i1 %13, i1 %14 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { i64, double, %Array* }* + %19 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 2 + store i64 %idx__1, i64* %19, align 4 + store double %16, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %idx__1, -1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____adj({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + %4 = sub i64 0, %3 + %5 = sdiv i64 %4, -1 + %6 = mul i64 -1, %5 + %7 = add i64 %3, %6 + %8 = insertvalue %Range zeroinitializer, i64 %7, 0 + %9 = insertvalue %Range %8, i64 1, 1 + %10 = insertvalue %Range %9, i64 %3, 2 + %11 = extractvalue %Range %10, 0 + %12 = extractvalue %Range %10, 1 + %13 = extractvalue %Range %10, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %14 = icmp sgt i64 %12, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar1__idx__ = phi i64 [ %11, %preheader__1 ], [ %25, %exiting__1 ] + %15 = icmp sle i64 %__qsVar1__idx__, %13 + %16 = icmp sge i64 %__qsVar1__idx__, %13 + %17 = select i1 %14, i1 %15, i1 %16 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + %19 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, double, %Array* }* + %22 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 2 + store i64 %__qsVar1__idx__, i64* %22, align 4 + store double %19, double* %23, align 8 + store %Array* %target, %Array** %24, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %25 = add i64 %__qsVar1__idx__, %12 + br label %header__1 + +exit__1: ; preds = %header__1 + %26 = sub i64 %nSteps, 1 + %27 = sub i64 %26, 0 + %28 = sdiv i64 %27, 1 + %29 = mul i64 1, %28 + %30 = add i64 0, %29 + %31 = insertvalue %Range zeroinitializer, i64 %30, 0 + %32 = insertvalue %Range %31, i64 -1, 1 + %33 = insertvalue %Range %32, i64 0, 2 + %34 = extractvalue %Range %33, 0 + %35 = extractvalue %Range %33, 1 + %36 = extractvalue %Range %33, 2 + br label %preheader__2 + +preheader__2: ; preds = %exit__1 + %37 = icmp sgt i64 %35, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__2 + %__qsVar0__idx__ = phi i64 [ %34, %preheader__2 ], [ %48, %exiting__2 ] + %38 = icmp sle i64 %__qsVar0__idx__, %36 + %39 = icmp sge i64 %__qsVar0__idx__, %36 + %40 = select i1 %37, i1 %38, i1 %39 + br i1 %40, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %41 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %41) + %42 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { i64, double, %Array* }* + %45 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %45, align 4 + store double %42, double* %46, align 8 + store %Array* %target, %Array** %47, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %41, %Tuple* %43, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %48 = add i64 %__qsVar0__idx__, %35 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %8 = icmp sle i64 %idx, %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %10 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, double, %Array* }* + %13 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 2 + store i64 %idx, i64* %13, align 4 + store double %10, double* %14, align 8 + store %Array* %target, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { i64, double, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { i64, double, %Array* }* %12, { i64, double, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %21 = sub i64 %nSteps, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idx__1 = phi i64 [ %21, %preheader__1 ], [ %36, %exiting__2 ] + %22 = icmp sle i64 %idx__1, 0 + %23 = icmp sge i64 %idx__1, 0 + %24 = select i1 false, i1 %22, i1 %23 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %25) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %26 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { i64, double, %Array* }* + %29 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 2 + store i64 %idx__1, i64* %29, align 4 + store double %26, double* %30, align 8 + store %Array* %target, %Array** %31, align 8 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Array*, { i64, double, %Array* }* }* + %34 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %33, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %34, align 8 + store { i64, double, %Array* }* %28, { i64, double, %Array* }** %35, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %25, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %idx__1, -1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + %8 = sub i64 0, %7 + %9 = sdiv i64 %8, -1 + %10 = mul i64 -1, %9 + %11 = add i64 %7, %10 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 1, 1 + %14 = insertvalue %Range %13, i64 %7, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar1__idx__ = phi i64 [ %15, %preheader__1 ], [ %33, %exiting__1 ] + %19 = icmp sle i64 %__qsVar1__idx__, %17 + %20 = icmp sge i64 %__qsVar1__idx__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, double, %Array* }* + %26 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 2 + store i64 %__qsVar1__idx__, i64* %26, align 4 + store double %23, double* %27, align 8 + store %Array* %target, %Array** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Array*, { i64, double, %Array* }* }* + %31 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %30, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %31, align 8 + store { i64, double, %Array* }* %25, { i64, double, %Array* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %29, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %__qsVar1__idx__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + %34 = sub i64 %nSteps, 1 + %35 = sub i64 %34, 0 + %36 = sdiv i64 %35, 1 + %37 = mul i64 1, %36 + %38 = add i64 0, %37 + %39 = insertvalue %Range zeroinitializer, i64 %38, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__2 + +preheader__2: ; preds = %exit__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__2 + %__qsVar0__idx__ = phi i64 [ %42, %preheader__2 ], [ %60, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0__idx__, %44 + %47 = icmp sge i64 %__qsVar0__idx__, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %49, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %49) + call void @__quantum__rt__callable_make_controlled(%Callable* %49) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %50 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %51 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %52 = bitcast %Tuple* %51 to { i64, double, %Array* }* + %53 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 0 + %54 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 1 + %55 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %53, align 4 + store double %50, double* %54, align 8 + store %Array* %target, %Array** %55, align 8 + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { %Array*, { i64, double, %Array* }* }* + %58 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %57, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %58, align 8 + store { i64, double, %Array* }* %52, { i64, double, %Array* }** %59, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %49, %Tuple* %56, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %49, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %49, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %__qsVar0__idx__, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal %Range @Microsoft__Quantum__Arrays___be8c93aed1174ddf9a1dc1ba0169742c_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { %Qubit*, %Qubit*, %Qubit* }** + %6 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %5, align 8 + %7 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = sub i64 %0, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %12) + %15 = bitcast i8* %14 to { %Qubit*, %Qubit*, %Qubit* }** + %16 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %15, align 8 + %17 = bitcast { %Qubit*, %Qubit*, %Qubit* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %10 +} + +define internal %Range @Microsoft__Quantum__Arrays___a8e1efd738ea4e35a379366d158d6002_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Range @Microsoft__Quantum__Arrays___efb0462d99e445f898c5269230c6127d_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = sub i64 %0, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %12) + %15 = bitcast i8* %14 to { i2, %Qubit* }** + %16 = load { i2, %Qubit* }*, { i2, %Qubit* }** %15, align 8 + %17 = bitcast { i2, %Qubit* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %10 +} + +define internal void @Lifted__PartialApplication__43__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__43__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__43__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Qubit* }* %15, { %Array*, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__43__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Qubit* }* %15, { %Array*, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__body(%Array* %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__adj(%Array* %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Qubit* }*, { %Array*, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__ctl(%Array* %3, { %Array*, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Qubit* }*, { %Array*, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__ctladj(%Array* %3, { %Array*, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__20__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__20__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__body(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @23, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Qubit* }* + %9 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %9, align 8 + store %Qubit* %targetRegister, %Qubit** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__adj(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @23, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Qubit* }* + %9 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %9, align 8 + store %Qubit* %targetRegister, %Qubit** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__ctl(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @23, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %14, align 8 + store %Qubit* %targetRegister, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___bb0a812d2b574442bf8d9673cc2bf3be_ApplyControlledOnBitString__ctladj(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @23, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %14, align 8 + store %Qubit* %targetRegister, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__44__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__44__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__44__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Array* }* %15, { %Array*, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__44__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Array* }* %15, { %Array*, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__body(%Array* %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__adj(%Array* %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Array* }*, { %Array*, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__ctl(%Array* %3, { %Array*, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Array* }*, { %Array*, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__ctladj(%Array* %3, { %Array*, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__21__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__21__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__body(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @23, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %9, align 8 + store %Array* %targetRegister, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__adj(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @23, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %9, align 8 + store %Array* %targetRegister, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__ctl(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @23, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %14, align 8 + store %Array* %targetRegister, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Array* }* %13, { %Array*, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___868340d48aba4d3d9872cb8fd11efca4_ApplyControlledOnBitString__ctladj(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @23, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %14, align 8 + store %Array* %targetRegister, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Array* }* %13, { %Array*, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__45__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, { %Array* }* }* + %5 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__45__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, { %Array* }* }* + %5 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__45__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, { %Array* }* }* }* getelementptr ({ %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, { %Array* }* }* %9, { %Array*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__45__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, { %Array* }* }* }* getelementptr ({ %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, { %Array* }* }* %9, { %Array*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @MemoryManagement__22__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__22__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____body(%Callable* %op, i64 %power, %Array* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = sub i64 %power, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxApplication = phi i64 [ 0, %entry ], [ %5, %exiting__1 ] + %1 = icmp sle i64 %idxApplication, %0 + br i1 %1, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + store %Array* %target, %Array** %4, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %5 = add i64 %idxApplication, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____adj(%Callable* %op, i64 %power, %Array* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = sub i64 %power, 1 + %1 = sub i64 %0, 0 + %2 = sdiv i64 %1, 1 + %3 = mul i64 1, %2 + %4 = add i64 0, %3 + %5 = insertvalue %Range zeroinitializer, i64 %4, 0 + %6 = insertvalue %Range %5, i64 -1, 1 + %7 = insertvalue %Range %6, i64 0, 2 + %8 = extractvalue %Range %7, 0 + %9 = extractvalue %Range %7, 1 + %10 = extractvalue %Range %7, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %11 = icmp sgt i64 %9, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxApplication__ = phi i64 [ %8, %preheader__1 ], [ %19, %exiting__1 ] + %12 = icmp sle i64 %__qsVar0__idxApplication__, %10 + %13 = icmp sge i64 %__qsVar0__idxApplication__, %10 + %14 = select i1 %11, i1 %12, i1 %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array* }* + %18 = getelementptr inbounds { %Array* }, { %Array* }* %17, i32 0, i32 0 + store %Array* %target, %Array** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %__qsVar0__idxApplication__, %9 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____ctl(%Array* %__controlQubits__, { %Callable*, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %op = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %2 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %power = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %4 = sub i64 %power, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxApplication = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %5 = icmp sle i64 %idxApplication, %4 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %target, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %idxApplication, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____ctladj(%Array* %__controlQubits__, { %Callable*, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %op = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %2 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %power = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %4 = sub i64 %power, 1 + %5 = sub i64 %4, 0 + %6 = sdiv i64 %5, 1 + %7 = mul i64 1, %6 + %8 = add i64 0, %7 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = extractvalue %Range %11, 0 + %13 = extractvalue %Range %11, 1 + %14 = extractvalue %Range %11, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %15 = icmp sgt i64 %13, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxApplication__ = phi i64 [ %12, %preheader__1 ], [ %24, %exiting__1 ] + %16 = icmp sle i64 %__qsVar0__idxApplication__, %14 + %17 = icmp sge i64 %__qsVar0__idxApplication__, %14 + %18 = select i1 %15, i1 %16, i1 %17 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + call void @__quantum__rt__callable_make_controlled(%Callable* %19) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, %Array* }* + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %__qsVar0__idxApplication__, %13 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___5516aa15311e4ec2bc23553ec55e6745_BoundCA__body(%Array* %operations) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %operations, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %operations, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__46__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__23__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + ret %Callable* %20 +} + +define internal void @Lifted__PartialApplication__46__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__46__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__46__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__46__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___8ecd591e14354dd1acb17a9a79b0fc4a___QsRef3__ApplyBoundCA____ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__23__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__23__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___4535c5767a504f01baac901c4cee390f_DecomposedIntoTimeStepsCA__body({ i64, %Callable* }* %0, i64 %trotterOrder) { +entry: + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp eq i64 %trotterOrder, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, i64, %Callable* }* + %7 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 2 + store %Callable* %4, %Callable** %7, align 8 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__47__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__24__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %10 + +test1__1: ; preds = %entry + %11 = icmp eq i64 %trotterOrder, 2 + br i1 %11, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Callable*, i64, %Callable* }* + %15 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 1 + %17 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 2 + store %Callable* %12, %Callable** %15, align 8 + store i64 %nSteps, i64* %16, align 4 + store %Callable* %op, %Callable** %17, align 8 + %18 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__48__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__24__FunctionTable, %Tuple* %13) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %18 + +test2__1: ; preds = %test1__1 + %19 = srem i64 %trotterOrder, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %21 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, i64, %Callable* }* getelementptr ({ %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { %Callable*, i64, i64, %Callable* }* + %24 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 3 + store %Callable* %21, %Callable** %24, align 8 + store i64 %trotterOrder, i64* %25, align 4 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__49__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__25__FunctionTable, %Tuple* %22) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %28 + +else__1: ; preds = %test2__1 + %29 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @24, i32 0, i32 0)) + %30 = call %String* @__quantum__rt__int_to_string(i64 %trotterOrder) + %31 = call %String* @__quantum__rt__string_concatenate(%String* %29, %String* %30) + call void @__quantum__rt__string_update_reference_count(%String* %29, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + %32 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @25, i32 0, i32 0)) + %33 = call %String* @__quantum__rt__string_concatenate(%String* %31, %String* %32) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__fail(%String* %33) + unreachable + +continue__1: ; No predecessors! + unreachable +} + +define internal void @Lifted__PartialApplication__47__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__47__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__47__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__47__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____body({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____adj({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctl(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctladj(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__24__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__24__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__48__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__48__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__48__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__48__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____body({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____adj({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctl(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctladj(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Lifted__PartialApplication__49__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Callable* }* + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 1 + store i64 %4, i64* %9, align 4 + store %Callable* %6, %Callable** %10, align 8 + %11 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 0 + %13 = load double, double* %12, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %2, i64* %18, align 4 + store { i64, %Callable* }* %8, { i64, %Callable* }** %19, align 8 + store double %13, double* %20, align 8 + store %Array* %15, %Array** %21, align 8 + %22 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %16, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__49__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Callable* }* + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 1 + store i64 %4, i64* %9, align 4 + store %Callable* %6, %Callable** %10, align 8 + %11 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 0 + %13 = load double, double* %12, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %2, i64* %18, align 4 + store { i64, %Callable* }* %8, { i64, %Callable* }** %19, align 8 + store double %13, double* %20, align 8 + store %Array* %15, %Array** %21, align 8 + %22 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %16, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__49__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 3 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %9, i64* %14, align 4 + store %Callable* %11, %Callable** %15, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %17 = load double, double* %16, align 8 + %18 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, { i64, %Callable* }*, double, %Array* }* + %22 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 3 + store i64 %7, i64* %22, align 4 + store { i64, %Callable* }* %13, { i64, %Callable* }** %23, align 8 + store double %17, double* %24, align 8 + store %Array* %19, %Array** %25, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %28 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 1 + store %Array* %3, %Array** %28, align 8 + store { i64, { i64, %Callable* }*, double, %Array* }* %21, { i64, { i64, %Callable* }*, double, %Array* }** %29, align 8 + %30 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 0 + %31 = load %Callable*, %Callable** %30, align 8 + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %32) + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %26, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__49__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 3 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %9, i64* %14, align 4 + store %Callable* %11, %Callable** %15, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %17 = load double, double* %16, align 8 + %18 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, { i64, %Callable* }*, double, %Array* }* + %22 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 3 + store i64 %7, i64* %22, align 4 + store { i64, %Callable* }* %13, { i64, %Callable* }** %23, align 8 + store double %17, double* %24, align 8 + store %Array* %19, %Array** %25, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %28 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 1 + store %Array* %3, %Array** %28, align 8 + store { i64, { i64, %Callable* }*, double, %Array* }* %21, { i64, { i64, %Callable* }*, double, %Array* }** %29, align 8 + %30 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 0 + %31 = load %Callable*, %Callable** %30, align 8 + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %32) + call void @__quantum__rt__callable_make_controlled(%Callable* %32) + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %26, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body(i64 %5, { i64, %Callable* }* %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj(i64 %5, { i64, %Callable* }* %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, { i64, %Callable* }*, double, %Array* }*, { i64, { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl(%Array* %3, { i64, { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, { i64, %Callable* }*, double, %Array* }*, { i64, { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj(%Array* %3, { i64, { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__25__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__25__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body(i64 %order, { i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp sgt i64 %order, 2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %stepSizeOuter = call double @Microsoft__Quantum__Canon____QsRef3__TrotterStepSize____body(i64 %order) + %4 = fmul double 4.000000e+00, %stepSizeOuter + %stepSizeInner = fsub double 1.000000e+00, %4 + %5 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Callable* }* + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 1 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body(i64 %5, { i64, %Callable* }* %7, double %10, %Array* %target) + %11 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %nSteps, i64* %14, align 4 + store %Callable* %op, %Callable** %15, align 8 + %16 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body(i64 %11, { i64, %Callable* }* %13, double %16, %Array* %target) + %17 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { i64, %Callable* }* + %20 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 1 + store i64 %nSteps, i64* %20, align 4 + store %Callable* %op, %Callable** %21, align 8 + %22 = fmul double %stepSizeInner, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body(i64 %17, { i64, %Callable* }* %19, double %22, %Array* %target) + %23 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, %Callable* }* + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 1 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body(i64 %23, { i64, %Callable* }* %25, double %28, %Array* %target) + %29 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { i64, %Callable* }* + %32 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 1 + store i64 %nSteps, i64* %32, align 4 + store %Callable* %op, %Callable** %33, align 8 + %34 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____body(i64 %29, { i64, %Callable* }* %31, double %34, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %35 = icmp eq i64 %order, 2 + br i1 %35, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { i64, %Callable* }* + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 1 + store i64 %nSteps, i64* %38, align 4 + store %Callable* %op, %Callable** %39, align 8 + call void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____body({ i64, %Callable* }* %37, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, %Callable* }* + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + store i64 %nSteps, i64* %42, align 4 + store %Callable* %op, %Callable** %43, align 8 + call void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____body({ i64, %Callable* }* %41, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj(i64 %order, { i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp sgt i64 %order, 2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %__qsVar0__stepSizeOuter__ = call double @Microsoft__Quantum__Canon____QsRef3__TrotterStepSize____body(i64 %order) + %4 = fmul double 4.000000e+00, %__qsVar0__stepSizeOuter__ + %__qsVar1__stepSizeInner__ = fsub double 1.000000e+00, %4 + %5 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Callable* }* + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 1 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj(i64 %5, { i64, %Callable* }* %7, double %10, %Array* %target) + %11 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %nSteps, i64* %14, align 4 + store %Callable* %op, %Callable** %15, align 8 + %16 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj(i64 %11, { i64, %Callable* }* %13, double %16, %Array* %target) + %17 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { i64, %Callable* }* + %20 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 1 + store i64 %nSteps, i64* %20, align 4 + store %Callable* %op, %Callable** %21, align 8 + %22 = fmul double %__qsVar1__stepSizeInner__, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj(i64 %17, { i64, %Callable* }* %19, double %22, %Array* %target) + %23 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, %Callable* }* + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 1 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj(i64 %23, { i64, %Callable* }* %25, double %28, %Array* %target) + %29 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { i64, %Callable* }* + %32 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 1 + store i64 %nSteps, i64* %32, align 4 + store %Callable* %op, %Callable** %33, align 8 + %34 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____adj(i64 %29, { i64, %Callable* }* %31, double %34, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %35 = icmp eq i64 %order, 2 + br i1 %35, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { i64, %Callable* }* + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 1 + store i64 %nSteps, i64* %38, align 4 + store %Callable* %op, %Callable** %39, align 8 + call void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____adj({ i64, %Callable* }* %37, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, %Callable* }* + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + store i64 %nSteps, i64* %42, align 4 + store %Callable* %op, %Callable** %43, align 8 + call void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____adj({ i64, %Callable* }* %41, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %order = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %stepSize = load double, double* %4, align 8 + %5 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 0 + %nSteps = load i64, i64* %6, align 4 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 1 + %op = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %8 = icmp sgt i64 %order, 2 + br i1 %8, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %stepSizeOuter = call double @Microsoft__Quantum__Canon____QsRef3__TrotterStepSize____body(i64 %order) + %9 = fmul double 4.000000e+00, %stepSizeOuter + %stepSizeInner = fsub double 1.000000e+00, %9 + %10 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, %Callable* }* + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + store i64 %nSteps, i64* %13, align 4 + store %Callable* %op, %Callable** %14, align 8 + %15 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %10, i64* %18, align 4 + store { i64, %Callable* }* %12, { i64, %Callable* }** %19, align 8 + store double %15, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %17) + %22 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, %Callable* }* + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + store i64 %nSteps, i64* %25, align 4 + store %Callable* %op, %Callable** %26, align 8 + %27 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64, { i64, %Callable* }*, double, %Array* }* + %30 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 3 + store i64 %22, i64* %30, align 4 + store { i64, %Callable* }* %24, { i64, %Callable* }** %31, align 8 + store double %27, double* %32, align 8 + store %Array* %target, %Array** %33, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %29) + %34 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Callable* }* + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + store i64 %nSteps, i64* %37, align 4 + store %Callable* %op, %Callable** %38, align 8 + %39 = fmul double %stepSizeInner, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, { i64, %Callable* }*, double, %Array* }* + %42 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 1 + %44 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 2 + %45 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 3 + store i64 %34, i64* %42, align 4 + store { i64, %Callable* }* %36, { i64, %Callable* }** %43, align 8 + store double %39, double* %44, align 8 + store %Array* %target, %Array** %45, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %41) + %46 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Callable* }* + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + store i64 %nSteps, i64* %49, align 4 + store %Callable* %op, %Callable** %50, align 8 + %51 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %53 = bitcast %Tuple* %52 to { i64, { i64, %Callable* }*, double, %Array* }* + %54 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 1 + %56 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 2 + %57 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 3 + store i64 %46, i64* %54, align 4 + store { i64, %Callable* }* %48, { i64, %Callable* }** %55, align 8 + store double %51, double* %56, align 8 + store %Array* %target, %Array** %57, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %53) + %58 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %59 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %60 = bitcast %Tuple* %59 to { i64, %Callable* }* + %61 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 0 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 1 + store i64 %nSteps, i64* %61, align 4 + store %Callable* %op, %Callable** %62, align 8 + %63 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { i64, { i64, %Callable* }*, double, %Array* }* + %66 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 2 + %69 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 3 + store i64 %58, i64* %66, align 4 + store { i64, %Callable* }* %60, { i64, %Callable* }** %67, align 8 + store double %63, double* %68, align 8 + store %Array* %target, %Array** %69, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %65) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %70 = icmp eq i64 %order, 2 + br i1 %70, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { i64, %Callable* }* + %73 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 1 + store i64 %nSteps, i64* %73, align 4 + store %Callable* %op, %Callable** %74, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %75 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %76 = bitcast %Tuple* %75 to { { i64, %Callable* }*, double, %Array* }* + %77 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 0 + %78 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 1 + %79 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 2 + store { i64, %Callable* }* %72, { i64, %Callable* }** %77, align 8 + store double %stepSize, double* %78, align 8 + store %Array* %target, %Array** %79, align 8 + call void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %76) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { i64, %Callable* }* + %82 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 1 + store i64 %nSteps, i64* %82, align 4 + store %Callable* %op, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %85 = bitcast %Tuple* %84 to { { i64, %Callable* }*, double, %Array* }* + %86 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 1 + %88 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 2 + store { i64, %Callable* }* %81, { i64, %Callable* }** %86, align 8 + store double %stepSize, double* %87, align 8 + store %Array* %target, %Array** %88, align 8 + call void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %85) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %order = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %stepSize = load double, double* %4, align 8 + %5 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 0 + %nSteps = load i64, i64* %6, align 4 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 1 + %op = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %8 = icmp sgt i64 %order, 2 + br i1 %8, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %__qsVar0__stepSizeOuter__ = call double @Microsoft__Quantum__Canon____QsRef3__TrotterStepSize____body(i64 %order) + %9 = fmul double 4.000000e+00, %__qsVar0__stepSizeOuter__ + %__qsVar1__stepSizeInner__ = fsub double 1.000000e+00, %9 + %10 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, %Callable* }* + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + store i64 %nSteps, i64* %13, align 4 + store %Callable* %op, %Callable** %14, align 8 + %15 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %10, i64* %18, align 4 + store { i64, %Callable* }* %12, { i64, %Callable* }** %19, align 8 + store double %15, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %17) + %22 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, %Callable* }* + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + store i64 %nSteps, i64* %25, align 4 + store %Callable* %op, %Callable** %26, align 8 + %27 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64, { i64, %Callable* }*, double, %Array* }* + %30 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 3 + store i64 %22, i64* %30, align 4 + store { i64, %Callable* }* %24, { i64, %Callable* }** %31, align 8 + store double %27, double* %32, align 8 + store %Array* %target, %Array** %33, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %29) + %34 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Callable* }* + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + store i64 %nSteps, i64* %37, align 4 + store %Callable* %op, %Callable** %38, align 8 + %39 = fmul double %__qsVar1__stepSizeInner__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, { i64, %Callable* }*, double, %Array* }* + %42 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 1 + %44 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 2 + %45 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 3 + store i64 %34, i64* %42, align 4 + store { i64, %Callable* }* %36, { i64, %Callable* }** %43, align 8 + store double %39, double* %44, align 8 + store %Array* %target, %Array** %45, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %41) + %46 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Callable* }* + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + store i64 %nSteps, i64* %49, align 4 + store %Callable* %op, %Callable** %50, align 8 + %51 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %53 = bitcast %Tuple* %52 to { i64, { i64, %Callable* }*, double, %Array* }* + %54 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 1 + %56 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 2 + %57 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 3 + store i64 %46, i64* %54, align 4 + store { i64, %Callable* }* %48, { i64, %Callable* }** %55, align 8 + store double %51, double* %56, align 8 + store %Array* %target, %Array** %57, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %53) + %58 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %59 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %60 = bitcast %Tuple* %59 to { i64, %Callable* }* + %61 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 0 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 1 + store i64 %nSteps, i64* %61, align 4 + store %Callable* %op, %Callable** %62, align 8 + %63 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { i64, { i64, %Callable* }*, double, %Array* }* + %66 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 2 + %69 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 3 + store i64 %58, i64* %66, align 4 + store { i64, %Callable* }* %60, { i64, %Callable* }** %67, align 8 + store double %63, double* %68, align 8 + store %Array* %target, %Array** %69, align 8 + call void @Microsoft__Quantum__Canon___61f43532250345dc97f6e281c5067865___QsRef3__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %65) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %70 = icmp eq i64 %order, 2 + br i1 %70, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { i64, %Callable* }* + %73 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 1 + store i64 %nSteps, i64* %73, align 4 + store %Callable* %op, %Callable** %74, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %75 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %76 = bitcast %Tuple* %75 to { { i64, %Callable* }*, double, %Array* }* + %77 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 0 + %78 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 1 + %79 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 2 + store { i64, %Callable* }* %72, { i64, %Callable* }** %77, align 8 + store double %stepSize, double* %78, align 8 + store %Array* %target, %Array** %79, align 8 + call void @Microsoft__Quantum__Canon___312118ce527e450dad7256d305dcd726___QsRef3__Trotter2ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %76) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { i64, %Callable* }* + %82 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 1 + store i64 %nSteps, i64* %82, align 4 + store %Callable* %op, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %85 = bitcast %Tuple* %84 to { { i64, %Callable* }*, double, %Array* }* + %86 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 1 + %88 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 2 + store { i64, %Callable* }* %81, { i64, %Callable* }** %86, align 8 + store double %stepSize, double* %87, align 8 + store %Array* %target, %Array** %88, align 8 + call void @Microsoft__Quantum__Canon___f3d95047788745f3a01c09cc21f0c3ec___QsRef3__Trotter1ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %85) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal { double, double }* @Microsoft__Quantum__Canon___92b2fc57a79541c8b9df7a25eea41fad___QsRef3__ComposedOutput____body(%Callable* %outer, %Callable* %inner, double %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double }* + %2 = getelementptr inbounds { double }, { double }* %1, i32 0, i32 0 + store double %target, double* %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %inner, %Tuple* %0, %Tuple* %3) + %4 = bitcast %Tuple* %3 to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + %6 = load double, double* %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { double }* + %9 = getelementptr inbounds { double }, { double }* %8, i32 0, i32 0 + store double %6, double* %9, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %outer, %Tuple* %7, %Tuple* %10) + %11 = bitcast %Tuple* %10 to { { double, double }* }* + %12 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret { double, double }* %13 +} + +define internal %Callable* @Microsoft__Quantum__Canon___f695a1c4a84a4c22814bb23e1fd09776_Compose__body(%Callable* %outer, %Callable* %inner) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___92b2fc57a79541c8b9df7a25eea41fad___QsRef3__ComposedOutput____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %inner, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Callable* %outer, %Callable** %4, align 8 + store %Callable* %inner, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__50__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__26__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__50__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { double }* + %6 = getelementptr inbounds { double }, { double }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, double }* getelementptr ({ %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store %Callable* %4, %Callable** %11, align 8 + store double %7, double* %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___92b2fc57a79541c8b9df7a25eea41fad___QsRef3__ComposedOutput____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load %Callable*, %Callable** %2, align 8 + %6 = load double, double* %3, align 8 + %7 = call { double, double }* @Microsoft__Quantum__Canon___92b2fc57a79541c8b9df7a25eea41fad___QsRef3__ComposedOutput____body(%Callable* %4, %Callable* %5, double %6) + %8 = bitcast %Tuple* %result-tuple to { { double, double }* }* + %9 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %8, i32 0, i32 0 + store { double, double }* %7, { double, double }** %9, align 8 + ret void +} + +define internal void @MemoryManagement__26__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__26__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___0f498e1ece294d4db89965060edcd2ac_ApplyWithCA__body(%Callable* %outerOperation, %Callable* %innerOperation, { %Array* }* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %target, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %target to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__callable_invoke(%Callable* %innerOperation, %Tuple* %2, %Tuple* null) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___0f498e1ece294d4db89965060edcd2ac_ApplyWithCA__adj(%Callable* %outerOperation, %Callable* %innerOperation, { %Array* }* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %target, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %target to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %2, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %2, %Tuple* null) + %5 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %5) + call void @__quantum__rt__callable_invoke(%Callable* %5, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___0f498e1ece294d4db89965060edcd2ac_ApplyWithCA__ctl(%Array* %controlRegister, { %Callable*, %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %0, i32 0, i32 2 + %target = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %target, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %target to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %6, %Tuple* null) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store { %Array* }* %target, { %Array* }** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %8, %Tuple* null) + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___0f498e1ece294d4db89965060edcd2ac_ApplyWithCA__ctladj(%Array* %controlRegister, { %Callable*, %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %0, i32 0, i32 2 + %target = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %target, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %target to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %6, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, { %Array* }* }* + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %10, i32 0, i32 1 + store %Array* %controlRegister, %Array** %11, align 8 + store { %Array* }* %target, { %Array* }** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %6, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__body(%Result* %expected, %Qubit* %q) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 -2, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %q, %Qubit** %5, align 8 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @26, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %0, %Array* %3, %Result* %expected, %String* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + ret void +} + +declare %Result* @__quantum__rt__result_get_zero() + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__adj(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctl(%Array* %ctrls, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctladj(%Array* %__controlQubits__, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctl(%Array* %__controlQubits__, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double 1.000000e+00, %String* %msg, double 1.000000e-10) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %controllingQubits, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Array*, %Result*, %String* }* + %7 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 3 + store %Array* %bases, %Array** %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + store %Result* %result, %Result** %9, align 8 + store %String* %msg, %String** %10, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__adj(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__ctl(%Array*, { %Array*, %Array*, %Result*, double, %String*, double }*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare %String* @__quantum__rt__result_to_string(%Result*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__adj(%Result* %expected, %Qubit* %q) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 -2, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %q, %Qubit** %5, align 8 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @26, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %0, %Array* %3, %Result* %expected, %String* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__ctl(%Array* %__controlQubits__, { %Result*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 0 + %expected = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 1 + %q = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 -2, i2* %5, align 1 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %q, %Qubit** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @26, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array*, %Result*, %String* }* + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 3 + store %Array* %3, %Array** %14, align 8 + store %Array* %6, %Array** %15, align 8 + store %Result* %expected, %Result** %16, align 8 + store %String* %11, %String** %17, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__ctladj(%Array* %__controlQubits__, { %Result*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 0 + %expected = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 1 + %q = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 -2, i2* %5, align 1 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %q, %Qubit** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @26, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array*, %Result*, %String* }* + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 3 + store %Array* %3, %Array** %14, align 8 + store %Array* %6, %Array** %15, align 8 + store %Result* %expected, %Result** %16, align 8 + store %String* %11, %String** %17, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = icmp ne i1 %actual, %expected + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Diagnostics___93054858c55a4d069c8a89cbd644c719___QsRef3__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics___93054858c55a4d069c8a89cbd644c719___QsRef3__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @27, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_concatenate(%String* %0, %String* %message) + %2 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @28, i32 0, i32 0)) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %2, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + br i1 %expected, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %5 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @29, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @30, i32 0, i32 0)) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %7 = phi %String* [ %5, %condTrue__1 ], [ %6, %condFalse__1 ] + %8 = call %String* @__quantum__rt__string_concatenate(%String* %4, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @31, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__string_concatenate(%String* %8, %String* %9) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + br i1 %actual, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condContinue__1 + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @29, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condContinue__1 + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @30, i32 0, i32 0)) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condTrue__2 + %13 = phi %String* [ %11, %condTrue__2 ], [ %12, %condFalse__2 ] + %14 = call %String* @__quantum__rt__string_concatenate(%String* %10, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @__quantum__rt__fail(%String* %14) + unreachable +} + +define internal void @Microsoft__Quantum__Diagnostics___1cff4dbf452349c0aab5551517df2535___QsRef3__FormattedFailure____body(i64 %actual, i64 %expected, %String* %message) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @27, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_concatenate(%String* %0, %String* %message) + %2 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @28, i32 0, i32 0)) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %2, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + %5 = call %String* @__quantum__rt__int_to_string(i64 %expected) + %6 = call %String* @__quantum__rt__string_concatenate(%String* %4, %String* %5) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %5, i32 -1) + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @31, i32 0, i32 0)) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %9 = call %String* @__quantum__rt__int_to_string(i64 %actual) + %10 = call %String* @__quantum__rt__string_concatenate(%String* %8, %String* %9) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__fail(%String* %10) + unreachable +} + +define internal %Array* @Microsoft__Quantum__Arrays__SequenceI__body(i64 %from, i64 %to) { +entry: + %array = alloca %Array*, align 8 + %0 = icmp sge i64 %to, %from + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([32 x i8], [32 x i8]* @32, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %0, %String* %1) + %2 = sub i64 %to, %from + %n = add i64 %2, 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %n) + %4 = sub i64 %n, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %5) + %8 = bitcast i8* %7 to i64* + store i64 0, i64* %8, align 4 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %3, %Array** %array, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %10 = sub i64 %n, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %i = phi i64 [ 0, %exit__1 ], [ %17, %exiting__2 ] + %11 = icmp sle i64 %i, %10 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = load %Array*, %Array** %array, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 -1) + %13 = call %Array* @__quantum__rt__array_copy(%Array* %12, i1 false) + %14 = add i64 %from, %i + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %i) + %16 = bitcast i8* %15 to i64* + store i64 %14, i64* %16, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + store %Array* %13, %Array** %array, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %17 = add i64 %i, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %18 = load %Array*, %Array** %array, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + ret %Array* %18 +} + +define internal %Array* @Microsoft__Quantum__Arrays___214772ffffeb49a1900df09d24a690d9_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to double* + %4 = load double, double* %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %4, double* %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { { double, double }* }* + %10 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %9, i32 0, i32 0 + %first = load { double, double }*, { double, double }** %10, align 8 + %11 = bitcast { double, double }* %first to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %13 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %14 = phi i64 [ 0, %continue__1 ], [ %18, %exiting__1 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %14) + %17 = bitcast i8* %16 to { double, double }** + store { double, double }* %first, { double, double }** %17, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %14, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %12, %Array** %retval, align 8 + %19 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %27 = sub i64 %length, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idx = phi i64 [ 1, %exit__2 ], [ %46, %exiting__3 ] + %28 = icmp sle i64 %idx, %27 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + %30 = call %Array* @__quantum__rt__array_copy(%Array* %29, i1 false) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %32 = bitcast i8* %31 to double* + %33 = load double, double* %32, align 8 + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double }* + %36 = getelementptr inbounds { double }, { double }* %35, i32 0, i32 0 + store double %33, double* %36, align 8 + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %34, %Tuple* %37) + %38 = bitcast %Tuple* %37 to { { double, double }* }* + %39 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %38, i32 0, i32 0 + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 %idx) + %42 = bitcast i8* %41 to { double, double }** + %43 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 1) + %44 = load { double, double }*, { double, double }** %42, align 8 + %45 = bitcast { double, double }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + store { double, double }* %40, { double, double }** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 1) + store %Array* %30, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %56, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { double, double }** + %54 = load { double, double }*, { double, double }** %53, align 8 + %55 = bitcast { double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %56 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %47 +} + +define internal %Array* @Microsoft__Quantum__Arrays___20d53ea8250b40bc86f0a0047df3f3c9_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to i64* + %4 = load i64, i64* %3, align 4 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %4, i64* %7, align 4 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { double }* + %10 = getelementptr inbounds { double }, { double }* %9, i32 0, i32 0 + %first = load double, double* %10, align 8 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %12 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %13 = phi i64 [ 0, %continue__1 ], [ %17, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %13) + %16 = bitcast i8* %15 to double* + store double %first, double* %16, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %11, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %18 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %34, %exiting__2 ] + %19 = icmp sle i64 %idx, %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + %21 = call %Array* @__quantum__rt__array_copy(%Array* %20, i1 false) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %23 = bitcast i8* %22 to i64* + %24 = load i64, i64* %23, align 4 + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { i64 }* + %27 = getelementptr inbounds { i64 }, { i64 }* %26, i32 0, i32 0 + store i64 %24, i64* %27, align 4 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %25, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { double }* + %30 = getelementptr inbounds { double }, { double }* %29, i32 0, i32 0 + %31 = load double, double* %30, align 8 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %idx) + %33 = bitcast i8* %32 to double* + store double %31, double* %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + store %Array* %21, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %34 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %35 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %35 +} + +define internal %Range @Microsoft__Quantum__Arrays___94a71e0233254bc7929a7d7210bcd75d_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i1, i64 }** + %6 = load { i1, i64 }*, { i1, i64 }** %5, align 8 + %7 = bitcast { i1, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = sub i64 %0, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %12) + %15 = bitcast i8* %14 to { i1, i64 }** + %16 = load { i1, i64 }*, { i1, i64 }** %15, align 8 + %17 = bitcast { i1, i64 }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %10 +} + +define internal %Array* @Microsoft__Quantum__Arrays___350337fdf0114f61a4061047b90dbf85_Subarray__body(%Array* %indices, %Array* %array) { +entry: + %sliced = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i1, i64 }** + %6 = load { i1, i64 }*, { i1, i64 }** %5, align 8 + %7 = bitcast { i1, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %9 = icmp eq i64 %nSliced, 0 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %11 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %13 = bitcast i8* %12 to i64* + %14 = load i64, i64* %13, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %14) + %16 = bitcast i8* %15 to { i1, i64 }** + %17 = load { i1, i64 }*, { i1, i64 }** %16, align 8 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nSliced) + %19 = sub i64 %nSliced, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %11 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { i1, i64 }** + %24 = load { i1, i64 }*, { i1, i64 }** %23, align 8 + %25 = bitcast { i1, i64 }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %10 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %32, %exiting__3 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %27) + %30 = bitcast i8* %29 to { i1, i64 }** + store { i1, i64 }* %17, { i1, i64 }** %30, align 8 + %31 = bitcast { i1, i64 }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %32 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %sliced, align 8 + %33 = sub i64 %nSliced, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %34 = phi i64 [ 0, %exit__3 ], [ %40, %exiting__4 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %34) + %37 = bitcast i8* %36 to { i1, i64 }** + %38 = load { i1, i64 }*, { i1, i64 }** %37, align 8 + %39 = bitcast { i1, i64 }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %39, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %40 = add i64 %34, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %41 = sub i64 %nSliced, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %56, %exiting__5 ] + %42 = icmp sle i64 %idx, %41 + br i1 %42, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %43 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 -1) + %44 = call %Array* @__quantum__rt__array_copy(%Array* %43, i1 false) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %idx) + %46 = bitcast i8* %45 to i64* + %47 = load i64, i64* %46, align 4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %47) + %49 = bitcast i8* %48 to { i1, i64 }** + %50 = load { i1, i64 }*, { i1, i64 }** %49, align 8 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %idx) + %52 = bitcast i8* %51 to { i1, i64 }** + %53 = bitcast { i1, i64 }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 1) + %54 = load { i1, i64 }*, { i1, i64 }** %52, align 8 + %55 = bitcast { i1, i64 }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %55, i32 -1) + store { i1, i64 }* %50, { i1, i64 }** %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + store %Array* %44, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %43, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %56 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %57 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %58 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %59 = phi i64 [ 0, %exit__5 ], [ %65, %exiting__6 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %59) + %62 = bitcast i8* %61 to { i1, i64 }** + %63 = load { i1, i64 }*, { i1, i64 }** %62, align 8 + %64 = bitcast { i1, i64 }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %64, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %65 = add i64 %59, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %57) + %67 = sub i64 %66, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %68 = phi i64 [ 0, %exit__6 ], [ %74, %exiting__7 ] + %69 = icmp sle i64 %68, %67 + br i1 %69, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %57, i64 %68) + %71 = bitcast i8* %70 to { i1, i64 }** + %72 = load { i1, i64 }*, { i1, i64 }** %71, align 8 + %73 = bitcast { i1, i64 }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %74 = add i64 %68, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + ret %Array* %57 +} + +define internal %Callable* @Microsoft__Quantum__Arrays___7d83b54afca94675b63617b69b56aa7a_ElementAt__body(i64 %index, %Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp sge i64 %index, 0 + br i1 %8, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %exit__1 + %9 = icmp slt i64 %index, %0 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %exit__1 + %10 = phi i1 [ %9, %condTrue__1 ], [ %8, %exit__1 ] + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @34, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %10, %String* %11) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %index) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %condContinue__1 + %16 = phi i64 [ 0, %condContinue__1 ], [ %21, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to %Callable** + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + ret %Callable* %14 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Arrays___b20df4913ab0459888bcf1448be084b3_ElementAt__body(i64 %index, %Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { %Array*, %Array* }*, %Array* }** + %6 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 0 + %8 = load { %Array*, %Array* }*, { %Array*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %16 = bitcast { { %Array*, %Array* }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %18 = icmp sge i64 %index, 0 + br i1 %18, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %exit__1 + %19 = icmp slt i64 %index, %0 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %exit__1 + %20 = phi i1 [ %19, %condTrue__1 ], [ %18, %exit__1 ] + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @34, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %20, %String* %21) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %index) + %23 = bitcast i8* %22 to { { %Array*, %Array* }*, %Array* }** + %24 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %24, i32 0, i32 0 + %26 = load { %Array*, %Array* }*, { %Array*, %Array* }** %25, align 8 + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 0 + %28 = load %Array*, %Array** %27, align 8 + %29 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 1 + %30 = load %Array*, %Array** %29, align 8 + %31 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %24, i32 0, i32 1 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 1) + %33 = bitcast { %Array*, %Array* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 1) + %34 = bitcast { { %Array*, %Array* }*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + %35 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %condContinue__1 + %36 = phi i64 [ 0, %condContinue__1 ], [ %51, %exiting__2 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %36) + %39 = bitcast i8* %38 to { { %Array*, %Array* }*, %Array* }** + %40 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %39, align 8 + %41 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %40, i32 0, i32 0 + %42 = load { %Array*, %Array* }*, { %Array*, %Array* }** %41, align 8 + %43 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %42, i32 0, i32 0 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %45 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %42, i32 0, i32 1 + %46 = load %Array*, %Array** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 -1) + %47 = bitcast { %Array*, %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + %48 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %40, i32 0, i32 1 + %49 = load %Array*, %Array** %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = bitcast { { %Array*, %Array* }*, %Array* }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %50, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %51 = add i64 %36, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %24 +} + +define internal %Array* @Microsoft__Quantum__Arrays___aadf45b9686643c385c8db16f19e226a_ConstantArray__body(i64 %length, double %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to double* + store double %value, double* %5, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal %Array* @Microsoft__Quantum__Arrays___905ac1230d4942abb41de7c18651b8a2_ConstantArray__body(i64 %length, { double, double }* %value) { +entry: + %0 = bitcast { double, double }* %value to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %2 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + store { double, double }* %value, { double, double }** %6, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret %Array* %1 +} + +define internal void @Lifted__PartialApplication__51__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Array* }* + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 1 + store i64 %2, i64* %8, align 4 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___7d83b54afca94675b63617b69b56aa7a_ElementAt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Array* }* + %1 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Callable* @Microsoft__Quantum__Arrays___7d83b54afca94675b63617b69b56aa7a_ElementAt__body(i64 %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Callable* }* + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + store %Callable* %5, %Callable** %7, align 8 + ret void +} + +define internal void @MemoryManagement__27__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__27__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__52__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Array* }* + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 1 + store i64 %2, i64* %8, align 4 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___b20df4913ab0459888bcf1448be084b3_ElementAt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Array* }* + %1 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load %Array*, %Array** %2, align 8 + %5 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Arrays___b20df4913ab0459888bcf1448be084b3_ElementAt__body(i64 %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %7 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %6, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %5, { { %Array*, %Array* }*, %Array* }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__28__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %22, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { %Array*, %Array* }*, %Array* }** + %11 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %11, i32 0, i32 0 + %13 = load { %Array*, %Array* }*, { %Array*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 %count-change) + %16 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 %count-change) + %18 = bitcast { %Array*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 %count-change) + %19 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %11, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 %count-change) + %21 = bitcast { { %Array*, %Array* }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %22 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__28__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %22, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { %Array*, %Array* }*, %Array* }** + %11 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %11, i32 0, i32 0 + %13 = load { %Array*, %Array* }*, { %Array*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 %count-change) + %16 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 %count-change) + %18 = bitcast { %Array*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 %count-change) + %19 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %11, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 %count-change) + %21 = bitcast { { %Array*, %Array* }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %22 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Range @Microsoft__Quantum__Arrays___7f13ac56d0e24dbf8910091cb8e6667a_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i64, %Callable* }** + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %11 = sub i64 %0, 1 + %12 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %11, 2 + %13 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %14 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %14) + %17 = bitcast i8* %16 to { i64, %Callable* }** + %18 = load { i64, %Callable* }*, { i64, %Callable* }** %17, align 8 + %19 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %18, i32 0, i32 1 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + %21 = bitcast { i64, %Callable* }* %18 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %14, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %12 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Arrays___a0f596cedd8444258509c1c2bf5316bc_Fold__body(%Callable* %folder, { i64, %Callable* }* %state, %Array* %array) { +entry: + %current = alloca { i64, %Callable* }*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 1) + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %state, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %state to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %5) + %8 = bitcast i8* %7 to { i64, %Callable* }** + %9 = load { i64, %Callable* }*, { i64, %Callable* }** %8, align 8 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %9, i32 0, i32 1 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 1) + %12 = bitcast { i64, %Callable* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store { i64, %Callable* }* %state, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %14 = call %Range @Microsoft__Quantum__Arrays___7f13ac56d0e24dbf8910091cb8e6667a_IndexRange__body(%Array* %array) + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %18 = icmp sgt i64 %16, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxElement = phi i64 [ %15, %preheader__1 ], [ %43, %exiting__2 ] + %19 = icmp sle i64 %idxElement, %17 + %20 = icmp sge i64 %idxElement, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = load { i64, %Callable* }*, { i64, %Callable* }** %current, align 8 + %23 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %22, i32 0, i32 1 + %24 = load %Callable*, %Callable** %23, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 1) + %25 = bitcast { i64, %Callable* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 1) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %27 = bitcast i8* %26 to { i64, %Callable* }** + %28 = load { i64, %Callable* }*, { i64, %Callable* }** %27, align 8 + %29 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %28, i32 0, i32 1 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 1) + %31 = bitcast { i64, %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { { i64, %Callable* }*, { i64, %Callable* }* }* + %34 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %33, i32 0, i32 1 + store { i64, %Callable* }* %22, { i64, %Callable* }** %34, align 8 + store { i64, %Callable* }* %28, { i64, %Callable* }** %35, align 8 + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }* }, { { i64, %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %folder, %Tuple* %32, %Tuple* %36) + %37 = bitcast %Tuple* %36 to { { i64, %Callable* }* }* + %38 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %37, i32 0, i32 0 + %39 = load { i64, %Callable* }*, { i64, %Callable* }** %38, align 8 + %40 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %39, i32 0, i32 1 + %41 = load %Callable*, %Callable** %40, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %41, i32 1) + %42 = bitcast { i64, %Callable* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + store { i64, %Callable* }* %39, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %43 = add i64 %idxElement, %16 + br label %header__2 + +exit__2: ; preds = %header__2 + %44 = load { i64, %Callable* }*, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %45 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %46 = phi i64 [ 0, %exit__2 ], [ %54, %exiting__3 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %46) + %49 = bitcast i8* %48 to { i64, %Callable* }** + %50 = load { i64, %Callable* }*, { i64, %Callable* }** %49, align 8 + %51 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %50, i32 0, i32 1 + %52 = load %Callable*, %Callable** %51, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %52, i32 -1) + %53 = bitcast { i64, %Callable* }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %54 = add i64 %46, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %55 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %44, i32 0, i32 1 + %56 = load %Callable*, %Callable** %55, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %56, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %56, i32 -1) + %57 = bitcast { i64, %Callable* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + ret { i64, %Callable* }* %44 +} + +define internal %Array* @Microsoft__Quantum__Arrays___4adfaffa1d224736a6c92f5abc9f739b_Padded__body(i64 %nElementsTotal, { double, double }* %defaultElement, %Array* %inputArray) { +entry: + %0 = bitcast { double, double }* %defaultElement to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %1 = sub i64 %nElementsInitial, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %9 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %10 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @35, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %9, i1 true, %String* %10) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___905ac1230d4942abb41de7c18651b8a2_ConstantArray__body(i64 %nElementsPad, { double, double }* %defaultElement) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %padArray) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %20 = icmp sge i64 %nElementsTotal, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__2 + %21 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %21) + %23 = sub i64 %22, 1 + br label %header__3 + +condFalse__1: ; preds = %exit__2 + %24 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %24) + %26 = sub i64 %25, 1 + br label %header__4 + +condContinue__1: ; preds = %exit__4, %exit__3 + %27 = phi %Array* [ %21, %exit__3 ], [ %24, %exit__4 ] + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + %28 = sub i64 %nElementsInitial, 1 + br label %header__5 + +header__3: ; preds = %exiting__3, %condTrue__1 + %29 = phi i64 [ 0, %condTrue__1 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %23 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + br label %condContinue__1 + +header__4: ; preds = %exiting__4, %condFalse__1 + %36 = phi i64 [ 0, %condFalse__1 ], [ %42, %exiting__4 ] + %37 = icmp sle i64 %36, %26 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %42 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + br label %condContinue__1 + +header__5: ; preds = %exiting__5, %condContinue__1 + %43 = phi i64 [ 0, %condContinue__1 ], [ %49, %exiting__5 ] + %44 = icmp sle i64 %43, %28 + br i1 %44, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %43) + %46 = bitcast i8* %45 to { double, double }** + %47 = load { double, double }*, { double, double }** %46, align 8 + %48 = bitcast { double, double }* %47 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %49 = add i64 %43, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + %50 = sub i64 %11, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %51 = phi i64 [ 0, %exit__5 ], [ %57, %exiting__6 ] + %52 = icmp sle i64 %51, %50 + br i1 %52, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %51) + %54 = bitcast i8* %53 to { double, double }** + %55 = load { double, double }*, { double, double }** %54, align 8 + %56 = bitcast { double, double }* %55 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %57 = add i64 %51, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %58 = sub i64 %11, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %59 = phi i64 [ 0, %exit__6 ], [ %65, %exiting__7 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %59) + %62 = bitcast i8* %61 to { double, double }** + %63 = load { double, double }*, { double, double }** %62, align 8 + %64 = bitcast { double, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %65 = add i64 %59, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %27 +} + +define internal void @Microsoft__Quantum__Characterization__DiscretePhaseEstimationIteration__body({ %Callable* }* %oracle, i64 %power, double %theta, %Array* %targetState, %Qubit* %controlQubit) { +entry: + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %oracle, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %oracle to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 1) + %3 = fneg double %theta + %4 = sitofp i64 %power to double + %inversionAngle = fmul double %3, %4 + call void @__quantum__qis__h__body(%Qubit* %controlQubit) + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %inversionAngle, %Qubit* %controlQubit) + %5 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %5) + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %controlQubit, %Qubit** %8, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %targetState, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64, %Array* }* + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + store i64 %power, i64* %11, align 4 + store %Array* %targetState, %Array** %12, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Array* }* }* getelementptr ({ %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array*, { i64, %Array* }* }* + %15 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %14, i32 0, i32 1 + store %Array* %6, %Array** %15, align 8 + store { i64, %Array* }* %10, { i64, %Array* }** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %5, %Tuple* %13, %Tuple* null) + call void @__quantum__qis__h__body(%Qubit* %controlQubit) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Characterization__DiscretePhaseEstimationIteration__adj({ %Callable* }* %oracle, i64 %power, double %theta, %Array* %targetState, %Qubit* %controlQubit) { +entry: + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %oracle, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %oracle to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 1) + %3 = fneg double %theta + %4 = sitofp i64 %power to double + %__qsVar0__inversionAngle__ = fmul double %3, %4 + call void @__quantum__qis__h__body(%Qubit* %controlQubit) + %5 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %5) + call void @__quantum__rt__callable_make_adjoint(%Callable* %5) + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %controlQubit, %Qubit** %8, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %targetState, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64, %Array* }* + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + store i64 %power, i64* %11, align 4 + store %Array* %targetState, %Array** %12, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Array* }* }* getelementptr ({ %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array*, { i64, %Array* }* }* + %15 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %14, i32 0, i32 1 + store %Array* %6, %Array** %15, align 8 + store { i64, %Array* }* %10, { i64, %Array* }** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %5, %Tuple* %13, %Tuple* null) + call void @Microsoft__Quantum__Intrinsic__Rz__adj(double %__qsVar0__inversionAngle__, %Qubit* %controlQubit) + call void @__quantum__qis__h__body(%Qubit* %controlQubit) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Characterization__DiscretePhaseEstimationIteration__ctl(%Array* %__controlQubits__, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 0 + %oracle = load { %Callable* }*, { %Callable* }** %1, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %oracle, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %oracle to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 1 + %power = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 2 + %theta = load double, double* %6, align 8 + %7 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetState = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 1) + %8 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 4 + %controlQubit = load %Qubit*, %Qubit** %8, align 8 + %9 = fneg double %theta + %10 = sitofp i64 %power to double + %inversionAngle = fmul double %9, %10 + call void @__quantum__qis__h__body(%Qubit* %controlQubit) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { double, %Qubit* }* + %13 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %12, i32 0, i32 1 + store double %inversionAngle, double* %13, align 8 + store %Qubit* %controlQubit, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %12) + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %3, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %15) + call void @__quantum__rt__callable_make_controlled(%Callable* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %16 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %16, i64 0) + %18 = bitcast i8* %17 to %Qubit** + store %Qubit* %controlQubit, %Qubit** %18, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %targetState, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { i64, %Array* }* + %21 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %20, i32 0, i32 1 + store i64 %power, i64* %21, align 4 + store %Array* %targetState, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Array* }* }* getelementptr ({ %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { i64, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %24, i32 0, i32 1 + store %Array* %16, %Array** %25, align 8 + store { i64, %Array* }* %20, { i64, %Array* }** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, { i64, %Array* }* }* }* getelementptr ({ %Array*, { %Array*, { i64, %Array* }* }* }, { %Array*, { %Array*, { i64, %Array* }* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { %Array*, { i64, %Array* }* }* }* + %29 = getelementptr inbounds { %Array*, { %Array*, { i64, %Array* }* }* }, { %Array*, { %Array*, { i64, %Array* }* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { %Array*, { i64, %Array* }* }* }, { %Array*, { %Array*, { i64, %Array* }* }* }* %28, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %29, align 8 + store { %Array*, { i64, %Array* }* }* %24, { %Array*, { i64, %Array* }* }** %30, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %27, %Tuple* null) + call void @__quantum__qis__h__body(%Qubit* %controlQubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Characterization__DiscretePhaseEstimationIteration__ctladj(%Array* %__controlQubits__, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 0 + %oracle = load { %Callable* }*, { %Callable* }** %1, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %oracle, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %oracle to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 1 + %power = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 2 + %theta = load double, double* %6, align 8 + %7 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetState = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 1) + %8 = getelementptr inbounds { { %Callable* }*, i64, double, %Array*, %Qubit* }, { { %Callable* }*, i64, double, %Array*, %Qubit* }* %0, i32 0, i32 4 + %controlQubit = load %Qubit*, %Qubit** %8, align 8 + %9 = fneg double %theta + %10 = sitofp i64 %power to double + %__qsVar0__inversionAngle__ = fmul double %9, %10 + call void @__quantum__qis__h__body(%Qubit* %controlQubit) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %3, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 0) + %14 = bitcast i8* %13 to %Qubit** + store %Qubit* %controlQubit, %Qubit** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %targetState, i32 1) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { i64, %Array* }* + %17 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %16, i32 0, i32 1 + store i64 %power, i64* %17, align 4 + store %Array* %targetState, %Array** %18, align 8 + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Array* }* }* getelementptr ({ %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { i64, %Array* }* }* + %21 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %20, i32 0, i32 1 + store %Array* %12, %Array** %21, align 8 + store { i64, %Array* }* %16, { i64, %Array* }** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, { i64, %Array* }* }* }* getelementptr ({ %Array*, { %Array*, { i64, %Array* }* }* }, { %Array*, { %Array*, { i64, %Array* }* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { %Array*, { i64, %Array* }* }* }* + %25 = getelementptr inbounds { %Array*, { %Array*, { i64, %Array* }* }* }, { %Array*, { %Array*, { i64, %Array* }* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { %Array*, { i64, %Array* }* }* }, { %Array*, { %Array*, { i64, %Array* }* }* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store { %Array*, { i64, %Array* }* }* %20, { %Array*, { i64, %Array* }* }** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %23, %Tuple* null) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { double, %Qubit* }* + %29 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %28, i32 0, i32 1 + store double %__qsVar0__inversionAngle__, double* %29, align 8 + store %Qubit* %controlQubit, %Qubit** %30, align 8 + call void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %28) + call void @__quantum__qis__h__body(%Qubit* %controlQubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetState, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation____QsRef3___AddGeneratorSystems____body(i64 %idxTerm, i64 %nTermsA, i64 %nTermsB, %Callable* %generatorIndexFunctionA, %Callable* %generatorIndexFunctionB) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + %0 = icmp slt i64 %idxTerm, %nTermsA + br i1 %0, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { i64 }* + %3 = getelementptr inbounds { i64 }, { i64 }* %2, i32 0, i32 0 + store i64 %idxTerm, i64* %3, align 4 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorIndexFunctionA, %Tuple* %1, %Tuple* %4) + %5 = bitcast %Tuple* %4 to { { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %7 + +else__1: ; preds = %entry + %8 = sub i64 %idxTerm, %nTermsA + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %8, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorIndexFunctionB, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %15 + +continue__1: ; No predecessors! + unreachable +} + +define internal { double, { { %Callable* }* }* }* @Microsoft__Quantum__Simulation____QsRef3___PauliBlockEncoding____body({ i64, %Callable* }* %generatorSystem, %Callable* %statePrepUnitary, %Callable* %multiplexer) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %intToGenIdx = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %intToGenIdx, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %intToGenIdx, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %intToGenIdx, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %intToGenIdx, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3__IdxToCoeff____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %intToGenIdx, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %intToGenIdx, i32 1) + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__PauliCoefficientFromGenIdx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Callable*, %Callable* }* + %7 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %6, i32 0, i32 2 + store %Callable* %3, %Callable** %7, align 8 + store %Callable* %intToGenIdx, %Callable** %8, align 8 + store %Callable* %4, %Callable** %9, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__53__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__29__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %10 = sub i64 %nTerms, 1 + %11 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %10, 2 + %12 = call %Array* @Microsoft__Quantum__Convert__RangeAsIntArray__body(%Range %11) + %coefficients = call %Array* @Microsoft__Quantum__Arrays___20d53ea8250b40bc86f0a0047df3f3c9_Mapped__body(%Callable* %op, %Array* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %13 = call double @Microsoft__Quantum__Math__PNorm__body(double 2.000000e+00, %Array* %coefficients) + %oneNorm = call double @Microsoft__Quantum__Math__PowD__body(double %13, double 2.000000e+00) + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3__IdxToUnitary____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %intToGenIdx, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %intToGenIdx, i32 1) + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3__PauliLCUUnitary____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Callable*, %Callable* }* + %18 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %17, i32 0, i32 2 + store %Callable* %14, %Callable** %18, align 8 + store %Callable* %intToGenIdx, %Callable** %19, align 8 + store %Callable* %15, %Callable** %20, align 8 + %21 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__54__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__30__FunctionTable, %Tuple* %16) + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %unitaryGenerator = bitcast %Tuple* %22 to { i64, %Callable* }* + %23 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %24 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + store i64 %nTerms, i64* %23, align 4 + store %Callable* %21, %Callable** %24, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array* }* + %27 = getelementptr inbounds { %Array* }, { %Array* }* %26, i32 0, i32 0 + store %Array* %coefficients, %Array** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %statePrepUnitary, %Tuple* %25, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { %Callable* }* + %30 = getelementptr inbounds { %Callable* }, { %Callable* }* %29, i32 0, i32 0 + %statePreparation = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %multiplexer, %Tuple* %22, %Tuple* %31) + %32 = bitcast %Tuple* %31 to { %Callable* }* + %33 = getelementptr inbounds { %Callable* }, { %Callable* }* %32, i32 0, i32 0 + %selector = load %Callable*, %Callable** %33, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 1) + %34 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %35 = call %Callable* @Microsoft__Quantum__Simulation___5250fdacb0b447b694e488df5bd25347_BlockEncodingByLCU__body(%Callable* %statePreparation, %Callable* %selector) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Callable*, %Callable* }* + %38 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %37, i32 0, i32 1 + store %Callable* %34, %Callable** %38, align 8 + store %Callable* %35, %Callable** %39, align 8 + %40 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__55__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__31__FunctionTable, %Tuple* %36) + %41 = call { %Callable* }* @Microsoft__Quantum__Simulation__BlockEncoding__body(%Callable* %40) + %blockEncoding = call { { %Callable* }* }* @Microsoft__Quantum__Simulation__BlockEncodingReflection__body({ %Callable* }* %41) + %42 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %blockEncoding, i32 0, i32 0 + %43 = load { %Callable* }*, { %Callable* }** %42, align 8 + %44 = getelementptr inbounds { %Callable* }, { %Callable* }* %43, i32 0, i32 0 + %45 = load %Callable*, %Callable** %44, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %45, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %45, i32 1) + %46 = bitcast { %Callable* }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %46, i32 1) + %47 = bitcast { { %Callable* }* }* %blockEncoding to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %45, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %45, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %46, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 1) + %48 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }* }* }* getelementptr ({ double, { { %Callable* }* }* }, { double, { { %Callable* }* }* }* null, i32 1) to i64)) + %49 = bitcast %Tuple* %48 to { double, { { %Callable* }* }* }* + %50 = getelementptr inbounds { double, { { %Callable* }* }* }, { double, { { %Callable* }* }* }* %49, i32 0, i32 0 + %51 = getelementptr inbounds { double, { { %Callable* }* }* }, { double, { { %Callable* }* }* }* %49, i32 0, i32 1 + store double %oneNorm, double* %50, align 8 + store { { %Callable* }* }* %blockEncoding, { { %Callable* }* }** %51, align 8 + %52 = getelementptr inbounds { %Callable* }, { %Callable* }* %41, i32 0, i32 0 + %53 = load %Callable*, %Callable** %52, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %intToGenIdx, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %intToGenIdx, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %intToGenIdx, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %intToGenIdx, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %45, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %45, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %46, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %40, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %53, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %53, i32 -1) + %54 = bitcast { %Callable* }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %45, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %46, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + ret { double, { { %Callable* }* }* }* %49 +} + +define internal void @Lifted__PartialApplication__53__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %3, i32 0, i32 1 + %5 = load %Callable*, %Callable** %4, align 8 + %6 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %3, i32 0, i32 2 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Callable* }* getelementptr ({ i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { i64, %Callable*, %Callable* }* + %10 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %9, i32 0, i32 2 + store i64 %2, i64* %10, align 4 + store %Callable* %5, %Callable** %11, align 8 + store %Callable* %7, %Callable** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %3, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__IdxToCoeff____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %1, align 4 + %5 = load %Callable*, %Callable** %2, align 8 + %6 = load %Callable*, %Callable** %3, align 8 + %7 = call double @Microsoft__Quantum__Simulation____QsRef3__IdxToCoeff____body(i64 %4, %Callable* %5, %Callable* %6) + %8 = bitcast %Tuple* %result-tuple to { double }* + %9 = getelementptr inbounds { double }, { double }* %8, i32 0, i32 0 + store double %7, double* %9, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Simulation__PauliCoefficientFromGenIdx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array* }*, %Array* }* + %1 = call double @Microsoft__Quantum__Simulation__PauliCoefficientFromGenIdx__body({ { %Array*, %Array* }*, %Array* }* %0) + %2 = bitcast %Tuple* %result-tuple to { double }* + %3 = getelementptr inbounds { double }, { double }* %2, i32 0, i32 0 + store double %1, double* %3, align 8 + ret void +} + +define internal void @MemoryManagement__29__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__29__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__54__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %3, i32 0, i32 1 + %5 = load %Callable*, %Callable** %4, align 8 + %6 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %3, i32 0, i32 2 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Callable* }* getelementptr ({ i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { i64, %Callable*, %Callable* }* + %10 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %9, i32 0, i32 2 + store i64 %2, i64* %10, align 4 + store %Callable* %5, %Callable** %11, align 8 + store %Callable* %7, %Callable** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %3, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__IdxToUnitary____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Callable* }, { i64, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %1, align 4 + %5 = load %Callable*, %Callable** %2, align 8 + %6 = load %Callable*, %Callable** %3, align 8 + %7 = call %Callable* @Microsoft__Quantum__Simulation____QsRef3__IdxToUnitary____body(i64 %4, %Callable* %5, %Callable* %6) + %8 = bitcast %Tuple* %result-tuple to { %Callable* }* + %9 = getelementptr inbounds { %Callable* }, { %Callable* }* %8, i32 0, i32 0 + store %Callable* %7, %Callable** %9, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__PauliLCUUnitary____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array* }*, %Array* }* + %1 = call %Callable* @Microsoft__Quantum__Simulation____QsRef3__PauliLCUUnitary____body({ { %Array*, %Array* }*, %Array* }* %0) + %2 = bitcast %Tuple* %result-tuple to { %Callable* }* + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %2, i32 0, i32 0 + store %Callable* %1, %Callable** %3, align 8 + ret void +} + +define internal void @MemoryManagement__30__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__30__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { %Callable* }* }* @Microsoft__Quantum__Simulation__BlockEncodingReflection__body({ %Callable* }* %__Item1__) { +entry: + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Callable* }* }* + %5 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %4, i32 0, i32 0 + store { %Callable* }* %__Item1__, { %Callable* }** %5, align 8 + %6 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %7 = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 1) + %8 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret { { %Callable* }* }* %4 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__BlockEncoding__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Lifted__PartialApplication__55__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store %Array* %5, %Array** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__55__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store %Array* %5, %Array** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__55__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Callable*, %Array*, %Array* }* + %14 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %13, i32 0, i32 2 + store %Callable* %7, %Callable** %14, align 8 + store %Array* %9, %Array** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { %Callable*, %Array*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { %Callable*, %Array*, %Array* }* %13, { %Callable*, %Array*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__55__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Callable*, %Array*, %Array* }* + %14 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %13, i32 0, i32 2 + store %Callable* %7, %Callable** %14, align 8 + store %Array* %9, %Array** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { %Callable*, %Array*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { %Callable*, %Array*, %Array* }* %13, { %Callable*, %Array*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____body(%Callable* %4, %Array* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____adj(%Callable* %4, %Array* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, %Array*, %Array* }*, { %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____ctl(%Array* %3, { %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, %Array*, %Array* }* }, { %Array*, { %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, %Array*, %Array* }*, { %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____ctladj(%Array* %3, { %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Simulation___5250fdacb0b447b694e488df5bd25347_BlockEncodingByLCU__body(%Callable* %statePreparation, %Callable* %selector) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %selector, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Callable* %statePreparation, %Callable** %4, align 8 + store %Callable* %selector, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__66__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__38__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 -1) + ret %Callable* %6 +} + +define internal void @MemoryManagement__31__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__31__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Simulation____QsRef3__IdxToCoeff____body(i64 %idx, %Callable* %genFun, %Callable* %genIdxToCoeff) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %genFun, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genFun, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %genIdxToCoeff, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genIdxToCoeff, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64 }* + %2 = getelementptr inbounds { i64 }, { i64 }* %1, i32 0, i32 0 + store i64 %idx, i64* %2, align 4 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %genFun, %Tuple* %0, %Tuple* %3) + %4 = bitcast %Tuple* %3 to { { { %Array*, %Array* }*, %Array* }* }* + %5 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %4, i32 0, i32 0 + %6 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %5, align 8 + %7 = bitcast { { %Array*, %Array* }*, %Array* }* %6 to %Tuple* + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %genIdxToCoeff, %Tuple* %7, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { double }* + %10 = getelementptr inbounds { double }, { double }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %d = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %12 = call double @__quantum__qis__sqrt__body(double %d) + %13 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 0 + %14 = load { %Array*, %Array* }*, { %Array*, %Array* }** %13, align 8 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %14, i32 0, i32 0 + %16 = load %Array*, %Array** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %14, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + %19 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + %21 = bitcast { %Array*, %Array* }* %14 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %genFun, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genFun, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %genIdxToCoeff, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genIdxToCoeff, i32 -1) + ret double %12 +} + +define internal double @Microsoft__Quantum__Simulation__PauliCoefficientFromGenIdx__body({ { %Array*, %Array* }*, %Array* }* %generatorIndex) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxPaulis = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxQubits = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 -1) + ret double %9 +} + +define internal %Callable* @Microsoft__Quantum__Simulation____QsRef3__IdxToUnitary____body(i64 %idx, %Callable* %genFun, %Callable* %genIdxToUnitary) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %genFun, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genFun, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %genIdxToUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genIdxToUnitary, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64 }* + %2 = getelementptr inbounds { i64 }, { i64 }* %1, i32 0, i32 0 + store i64 %idx, i64* %2, align 4 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %genFun, %Tuple* %0, %Tuple* %3) + %4 = bitcast %Tuple* %3 to { { { %Array*, %Array* }*, %Array* }* }* + %5 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %4, i32 0, i32 0 + %6 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %5, align 8 + %7 = bitcast { { %Array*, %Array* }*, %Array* }* %6 to %Tuple* + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %genIdxToUnitary, %Tuple* %7, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { %Callable* }* + %10 = getelementptr inbounds { %Callable* }, { %Callable* }* %9, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 0 + %13 = load { %Array*, %Array* }*, { %Array*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = load %Array*, %Array** %14, align 8 + %16 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %6, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %genFun, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genFun, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %genIdxToUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %genIdxToUnitary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + %20 = bitcast { %Array*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Callable* %11 +} + +define internal %Callable* @Microsoft__Quantum__Simulation____QsRef3__PauliLCUUnitary____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__56__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__32__FunctionTable, %Tuple* %11) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + ret %Callable* %15 +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____body(%Callable* %op, %Array* %auxiliary, %Array* %system) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + %0 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxiliary) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { %Array* }*, %Array* }* + %3 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %2, i32 0, i32 1 + store { %Array* }* %0, { %Array* }** %3, align 8 + store %Array* %system, %Array** %4, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %1, %Tuple* null) + %5 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + %7 = bitcast { %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____adj(%Callable* %op, %Array* %auxiliary, %Array* %system) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxiliary) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + store { %Array* }* %1, { %Array* }** %4, align 8 + store %Array* %system, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %2, %Tuple* null) + %6 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + %8 = bitcast { %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____ctl(%Array* %__controlQubits__, { %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %op = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %auxiliary = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %system = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %5 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxiliary) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Array* }*, %Array* }* + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %7, i32 0, i32 1 + store { %Array* }* %5, { %Array* }** %8, align 8 + store %Array* %system, %Array** %9, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, { { %Array* }*, %Array* }* }* + %12 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %11, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %12, align 8 + store { { %Array* }*, %Array* }* %7, { { %Array* }*, %Array* }** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %10, %Tuple* null) + %14 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + %16 = bitcast { %Array* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyBlockEncodingFromBEandQubit____ctladj(%Array* %__controlQubits__, { %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %op = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %auxiliary = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %system = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_controlled(%Callable* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %5 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxiliary) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Array* }*, %Array* }* + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %7, i32 0, i32 1 + store { %Array* }* %5, { %Array* }** %8, align 8 + store %Array* %system, %Array** %9, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, { { %Array* }*, %Array* }* }* + %12 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %11, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %12, align 8 + store { { %Array* }*, %Array* }* %7, { { %Array* }*, %Array* }** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %10, %Tuple* null) + %14 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + %16 = bitcast { %Array* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxPaulis = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxQubits = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 1) + %pauliString = call %Array* @Microsoft__Quantum__Simulation__IntsToPaulis__body(%Array* %idxPaulis) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %pauliQubits = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %idxQubits, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliQubits, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauli__body(%Array* %pauliString, %Array* %pauliQubits) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fcmp olt double %9, 0.000000e+00 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %12 = bitcast i8* %11 to i2* + store i2 0, i2* %12, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %theta = call double @Microsoft__Quantum__Math__PI__body() + %13 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %pauliQubits) + %qubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__1, i64 0) + %15 = bitcast i8* %14 to %Qubit** + store %Qubit* %13, %Qubit** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliQubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliQubits, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Simulation__IntsToPaulis__body(%Array* %ints) { +entry: + %paulis = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %ints, i32 1) + %nInts = call i64 @__quantum__rt__array_get_size_1d(%Array* %ints) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nInts) + %1 = sub i64 %nInts, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %0, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %7 = sub i64 %nInts, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxInt = phi i64 [ 0, %exit__1 ], [ %17, %exiting__2 ] + %8 = icmp sle i64 %idxInt, %7 + br i1 %8, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %9 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + %10 = call %Array* @__quantum__rt__array_copy(%Array* %9, i1 false) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ints, i64 %idxInt) + %12 = bitcast i8* %11 to i64* + %13 = load i64, i64* %12, align 4 + %14 = call i2 @Microsoft__Quantum__Simulation__IntToPauli__body(i64 %13) + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %idxInt) + %16 = bitcast i8* %15 to i2* + store i2 %14, i2* %16, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + store %Array* %10, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %17 = add i64 %idxInt, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %18 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %ints, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + ret %Array* %18 +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxPaulis__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxPaulis__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxQubits__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxQubits__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxPaulis__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxQubits__, i32 1) + %__qsVar3__pauliString__ = call %Array* @Microsoft__Quantum__Simulation__IntsToPaulis__body(%Array* %__qsVar0__idxPaulis__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar3__pauliString__, i32 1) + %__qsVar4__pauliQubits__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %__qsVar2__idxQubits__, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__pauliQubits__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %8 = bitcast i8* %7 to double* + %9 = load double, double* %8, align 8 + %10 = fcmp olt double %9, 0.000000e+00 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %12 = bitcast i8* %11 to i2* + store i2 0, i2* %12, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %theta = call double @Microsoft__Quantum__Math__PI__body() + %13 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %__qsVar4__pauliQubits__) + %qubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__1, i64 0) + %15 = bitcast i8* %14 to %Qubit** + store %Qubit* %13, %Qubit** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @Microsoft__Quantum__Canon__ApplyPauli__adj(%Array* %__qsVar3__pauliString__, %Array* %__qsVar4__pauliQubits__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxPaulis__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxQubits__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxPaulis__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar3__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__pauliQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar3__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__pauliQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxPaulis = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxQubits = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 1) + %pauliString = call %Array* @Microsoft__Quantum__Simulation__IntsToPaulis__body(%Array* %idxPaulis) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %pauliQubits = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %idxQubits, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliQubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliQubits, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + store %Array* %pauliString, %Array** %12, align 8 + store %Array* %pauliQubits, %Array** %13, align 8 + call void @Microsoft__Quantum__Canon__ApplyPauli__ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %11) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %15 = bitcast i8* %14 to double* + %16 = load double, double* %15, align 8 + %17 = fcmp olt double %16, 0.000000e+00 + br i1 %17, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %19 = bitcast i8* %18 to i2* + store i2 0, i2* %19, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %theta = call double @Microsoft__Quantum__Math__PI__body() + %20 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %pauliQubits) + %qubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__1, i64 0) + %22 = bitcast i8* %21 to %Qubit** + store %Qubit* %20, %Qubit** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, double, %Array* }* + %25 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %24, i32 0, i32 2 + store %Array* %paulis, %Array** %25, align 8 + store double %theta, double* %26, align 8 + store %Array* %qubits__1, %Array** %27, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxPaulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliQubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliQubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliQubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxPaulis__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxPaulis__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxQubits__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxQubits__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxPaulis__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxQubits__, i32 1) + %__qsVar3__pauliString__ = call %Array* @Microsoft__Quantum__Simulation__IntsToPaulis__body(%Array* %__qsVar0__idxPaulis__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar3__pauliString__, i32 1) + %__qsVar4__pauliQubits__ = call %Array* @Microsoft__Quantum__Arrays___781377a2a9174592812ecb8986ab6ed4_Subarray__body(%Array* %__qsVar2__idxQubits__, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__pauliQubits__, i32 1) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %11 = bitcast i8* %10 to double* + %12 = load double, double* %11, align 8 + %13 = fcmp olt double %12, 0.000000e+00 + br i1 %13, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 0, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %theta = call double @Microsoft__Quantum__Math__PI__body() + %16 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %__qsVar4__pauliQubits__) + %qubits__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits__1, i64 0) + %18 = bitcast i8* %17 to %Qubit** + store %Qubit* %16, %Qubit** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, double, %Array* }* + %21 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %20, i32 0, i32 1 + %23 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %20, i32 0, i32 2 + store %Array* %paulis, %Array** %21, align 8 + store double %theta, double* %22, align 8 + store %Array* %qubits__1, %Array** %23, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %20) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar3__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__pauliQubits__, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Array*, %Array* }* + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %25, i32 0, i32 1 + store %Array* %__qsVar3__pauliString__, %Array** %26, align 8 + store %Array* %__qsVar4__pauliQubits__, %Array** %27, align 8 + call void @Microsoft__Quantum__Canon__ApplyPauli__ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %25) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxPaulis__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxQubits__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxPaulis__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar3__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar4__pauliQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar3__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__pauliQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar3__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar4__pauliQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____body({ { %Callable* }* }* %blockEncoding, %Array* %auxiliary, %Array* %system) { +entry: + %0 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %blockEncoding, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = bitcast { { %Callable* }* }* %blockEncoding to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %7 = bitcast i8* %6 to i2* + store i2 0, i2* %7, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %8 = call double @Microsoft__Quantum__Math__PI__body() + %theta = fmul double -5.000000e-01, %8 + %9 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %system) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %11 = bitcast i8* %10 to %Qubit** + store %Qubit* %9, %Qubit** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + %12 = call double @Microsoft__Quantum__Math__PI__body() + call void @Microsoft__Quantum__Canon__RAll0__body(double %12, %Array* %auxiliary) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array*, %Array* }* + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %14, i32 0, i32 1 + store %Array* %auxiliary, %Array** %15, align 8 + store %Array* %system, %Array** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____adj({ { %Callable* }* }* %blockEncoding, %Array* %auxiliary, %Array* %system) { +entry: + %0 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %blockEncoding, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = bitcast { { %Callable* }* }* %blockEncoding to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %3, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %auxiliary, %Array** %9, align 8 + store %Array* %system, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + %11 = call double @Microsoft__Quantum__Math__PI__body() + call void @Microsoft__Quantum__Canon__RAll0__adj(double %11, %Array* %auxiliary) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %13 = bitcast i8* %12 to i2* + store i2 0, i2* %13, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %14 = call double @Microsoft__Quantum__Math__PI__body() + %theta = fmul double -5.000000e-01, %14 + %15 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %system) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %17 = bitcast i8* %16 to %Qubit** + store %Qubit* %15, %Qubit** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____ctl(%Array* %__controlQubits__, { { { %Callable* }* }*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 0 + %blockEncoding = load { { %Callable* }* }*, { { %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %blockEncoding, i32 0, i32 0 + %3 = load { %Callable* }*, { %Callable* }** %2, align 8 + %4 = getelementptr inbounds { %Callable* }, { %Callable* }* %3, i32 0, i32 0 + %5 = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %5, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %5, i32 1) + %6 = bitcast { %Callable* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }* }* %blockEncoding to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 1 + %auxiliary = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + %9 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 2 + %system = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %11 = bitcast i8* %10 to i2* + store i2 0, i2* %11, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %12 = call double @Microsoft__Quantum__Math__PI__body() + %theta = fmul double -5.000000e-01, %12 + %13 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %system) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %15 = bitcast i8* %14 to %Qubit** + store %Qubit* %13, %Qubit** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, double, %Array* }* + %18 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %17, i32 0, i32 2 + store %Array* %paulis, %Array** %18, align 8 + store double %theta, double* %19, align 8 + store %Array* %qubits, %Array** %20, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %17) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + %21 = call double @Microsoft__Quantum__Math__PI__body() + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array* }* + %24 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %23, i32 0, i32 1 + store double %21, double* %24, align 8 + store %Array* %auxiliary, %Array** %25, align 8 + call void @Microsoft__Quantum__Canon__RAll0__ctl(%Array* %__controlQubits__, { double, %Array* }* %23) + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %5, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, %Array* }* + %29 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %28, i32 0, i32 1 + store %Array* %auxiliary, %Array** %29, align 8 + store %Array* %system, %Array** %30, align 8 + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { %Array*, { %Array*, %Array* }* }* + %33 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %32, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %33, align 8 + store { %Array*, %Array* }* %28, { %Array*, %Array* }** %34, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %31, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____ctladj(%Array* %__controlQubits__, { { { %Callable* }* }*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 0 + %blockEncoding = load { { %Callable* }* }*, { { %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %blockEncoding, i32 0, i32 0 + %3 = load { %Callable* }*, { %Callable* }** %2, align 8 + %4 = getelementptr inbounds { %Callable* }, { %Callable* }* %3, i32 0, i32 0 + %5 = load %Callable*, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %5, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %5, i32 1) + %6 = bitcast { %Callable* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }* }* %blockEncoding to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 1 + %auxiliary = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 1) + %9 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 2 + %system = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %5, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_make_controlled(%Callable* %10) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, %Array* }* + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + store %Array* %auxiliary, %Array** %13, align 8 + store %Array* %system, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { %Array*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store { %Array*, %Array* }* %12, { %Array*, %Array* }** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %15, %Tuple* null) + %19 = call double @Microsoft__Quantum__Math__PI__body() + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array* }* + %22 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %21, i32 0, i32 1 + store double %19, double* %22, align 8 + store %Array* %auxiliary, %Array** %23, align 8 + call void @Microsoft__Quantum__Canon__RAll0__ctladj(%Array* %__controlQubits__, { double, %Array* }* %21) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %25 = bitcast i8* %24 to i2* + store i2 0, i2* %25, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %26 = call double @Microsoft__Quantum__Math__PI__body() + %theta = fmul double -5.000000e-01, %26 + %27 = call %Qubit* @Microsoft__Quantum__Arrays___af5d1f5b3fc545fd94571101b9dee3d5_Head__body(%Array* %system) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %29 = bitcast i8* %28 to %Qubit** + store %Qubit* %27, %Qubit** %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array*, double, %Array* }* + %32 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 1 + %34 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %31, i32 0, i32 2 + store %Array* %paulis, %Array** %32, align 8 + store double %theta, double* %33, align 8 + store %Array* %qubits, %Array** %34, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %31) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxiliary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__56__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { { %Array*, %Array* }*, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__56__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { { %Array*, %Array* }*, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__56__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %9, i32 0, i32 1 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { { %Array*, %Array* }*, %Array* }*, %Array* }* %9, { { { %Array*, %Array* }*, %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__56__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %9, i32 0, i32 1 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { { %Array*, %Array* }*, %Array* }*, %Array* }* %9, { { { %Array*, %Array* }*, %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____body({ { %Array*, %Array* }*, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, %Array* }, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____adj({ { %Array*, %Array* }*, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____ctl(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyPauliLCUUnitary____ctladj(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__32__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__32__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____body(double %trotterStepSize, i64 %trotterOrder, double %maxTime, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %11 = fdiv double %maxTime, %trotterStepSize + %nTimeSlices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %11) + %12 = sitofp i64 %nTimeSlices to double + %resizedTrotterStepSize = fdiv double %maxTime, %12 + %13 = sub i64 %nTimeSlices, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxTimeSlice = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %idxTimeSlice, %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %resizedTrotterStepSize) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array* }* + %18 = getelementptr inbounds { %Array* }, { %Array* }* %17, i32 0, i32 0 + store %Array* %qubits, %Array** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxTimeSlice, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %trotterStepSize) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %6 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %12 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %11, i32 0, i32 1 + store %Callable* %9, %Callable** %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__60__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__36__FunctionTable, %Tuple* %10) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %trotterForm = bitcast %Tuple* %15 to { i64, %Callable* }* + %16 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %trotterForm, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %trotterForm, i32 0, i32 1 + store i64 %nTerms, i64* %16, align 4 + store %Callable* %14, %Callable** %17, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %18 = call %Callable* @Microsoft__Quantum__Canon___4535c5767a504f01baac901c4cee390f_DecomposedIntoTimeStepsCA__body({ i64, %Callable* }* %trotterForm, i64 %trotterOrder) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Callable*, double }* + %21 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %20, i32 0, i32 1 + store %Callable* %18, %Callable** %21, align 8 + store double %trotterStepSize, double* %22, align 8 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__61__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %19) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Callable* %23 +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____adj(double %trotterStepSize, i64 %trotterOrder, double %maxTime, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %11 = fdiv double %maxTime, %trotterStepSize + %__qsVar0__nTimeSlices__ = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %11) + %12 = sitofp i64 %__qsVar0__nTimeSlices__ to double + %__qsVar1__resizedTrotterStepSize__ = fdiv double %maxTime, %12 + %13 = sub i64 %__qsVar0__nTimeSlices__, 1 + %14 = sub i64 %13, 0 + %15 = sdiv i64 %14, 1 + %16 = mul i64 1, %15 + %17 = add i64 0, %16 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %24 = icmp sgt i64 %22, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar2__idxTimeSlice__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__1 ] + %25 = icmp sle i64 %__qsVar2__idxTimeSlice__, %23 + %26 = icmp sge i64 %__qsVar2__idxTimeSlice__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %28 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %__qsVar1__resizedTrotterStepSize__) + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array* }* + %32 = getelementptr inbounds { %Array* }, { %Array* }* %31, i32 0, i32 0 + store %Array* %qubits, %Array** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %__qsVar2__idxTimeSlice__, %22 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____ctl(%Array* %__controlQubits__, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %trotterStepSize = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %trotterOrder = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %maxTime = load double, double* %3, align 8 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %qubits = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %17 = fdiv double %maxTime, %trotterStepSize + %nTimeSlices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %17) + %18 = sitofp i64 %nTimeSlices to double + %resizedTrotterStepSize = fdiv double %maxTime, %18 + %19 = sub i64 %nTimeSlices, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxTimeSlice = phi i64 [ 0, %entry ], [ %27, %exiting__1 ] + %20 = icmp sle i64 %idxTimeSlice, %19 + br i1 %20, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %21 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %resizedTrotterStepSize) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %21, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %qubits, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %27 = add i64 %idxTimeSlice, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____ctladj(%Array* %__controlQubits__, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %trotterStepSize = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %trotterOrder = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %maxTime = load double, double* %3, align 8 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %qubits = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %17 = fdiv double %maxTime, %trotterStepSize + %__qsVar0__nTimeSlices__ = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %17) + %18 = sitofp i64 %__qsVar0__nTimeSlices__ to double + %__qsVar1__resizedTrotterStepSize__ = fdiv double %maxTime, %18 + %19 = sub i64 %__qsVar0__nTimeSlices__, 1 + %20 = sub i64 %19, 0 + %21 = sdiv i64 %20, 1 + %22 = mul i64 1, %21 + %23 = add i64 0, %22 + %24 = insertvalue %Range zeroinitializer, i64 %23, 0 + %25 = insertvalue %Range %24, i64 -1, 1 + %26 = insertvalue %Range %25, i64 0, 2 + %27 = extractvalue %Range %26, 0 + %28 = extractvalue %Range %26, 1 + %29 = extractvalue %Range %26, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %30 = icmp sgt i64 %28, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar2__idxTimeSlice__ = phi i64 [ %27, %preheader__1 ], [ %40, %exiting__1 ] + %31 = icmp sle i64 %__qsVar2__idxTimeSlice__, %29 + %32 = icmp sge i64 %__qsVar2__idxTimeSlice__, %29 + %33 = select i1 %30, i1 %31, i1 %32 + br i1 %33, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %34 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %__qsVar1__resizedTrotterStepSize__) + %35 = call %Callable* @__quantum__rt__callable_copy(%Callable* %34, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %35) + call void @__quantum__rt__callable_make_controlled(%Callable* %35) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, %Array* }* + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %37, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %38, align 8 + store %Array* %qubits, %Array** %39, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %35, %Tuple* %36, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %40 = add i64 %__qsVar2__idxTimeSlice__, %28 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %idx, double %stepsize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %6 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %idx, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorSystemFunction, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %16 = load { %Array*, %Array* }*, { %Array*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Callable* }* }* + %27 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Callable* }*, { %Callable* }** %27, align 8 + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { double, %Array* }* + %33 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %32, i32 0, i32 1 + store double %stepsize, double* %33, align 8 + store %Array* %qubits, %Array** %34, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %30, %Tuple* %31, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + %35 = bitcast { %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____adj({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %idx, double %stepsize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %__qsVar0__evolutionSet__ = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %__qsVar0__evolutionSet__, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %__qsVar0__evolutionSet__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %__qsVar1__generatorSystem__ = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 1 + %__qsVar3__generatorSystemFunction__ = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %6 = bitcast { i64, %Callable* }* %__qsVar1__generatorSystem__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 0 + %__qsVar2__nTerms__ = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %idx, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__generatorSystemFunction__, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %__qsVar4__generatorIndex__ = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 0 + %16 = load { %Array*, %Array* }*, { %Array*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Callable* }* }* + %27 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Callable* }*, { %Callable* }** %27, align 8 + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + %31 = call %Callable* @__quantum__rt__callable_copy(%Callable* %30, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array* }* + %34 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %33, i32 0, i32 1 + store double %stepsize, double* %34, align 8 + store %Array* %qubits, %Array** %35, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %31, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + %36 = bitcast { %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____ctl(%Array* %__controlQubits__, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %2, align 8 + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %8 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %idx = load i64, i64* %10, align 4 + %11 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %stepsize = load double, double* %11, align 8 + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %13, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64 }* + %16 = getelementptr inbounds { i64 }, { i64 }* %15, i32 0, i32 0 + store i64 %idx, i64* %16, align 4 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorSystemFunction, %Tuple* %14, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { { %Array*, %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %19, align 8 + %20 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %29, %Tuple* %30) + %31 = bitcast %Tuple* %30 to { { %Callable* }* }* + %32 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Callable* }*, { %Callable* }** %32, align 8 + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { double, %Array* }* + %39 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 1 + store double %stepsize, double* %39, align 8 + store %Array* %qubits, %Array** %40, align 8 + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { %Array*, { double, %Array* }* }* + %43 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %43, align 8 + store { double, %Array* }* %38, { double, %Array* }** %44, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %41, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + %45 = bitcast { %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____ctladj(%Array* %__controlQubits__, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %__qsVar0__evolutionSet__ = load { %Callable* }*, { %Callable* }** %2, align 8 + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %__qsVar0__evolutionSet__, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %__qsVar0__evolutionSet__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %__qsVar1__generatorSystem__ = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 1 + %__qsVar3__generatorSystemFunction__ = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %8 = bitcast { i64, %Callable* }* %__qsVar1__generatorSystem__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %idx = load i64, i64* %10, align 4 + %11 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %stepsize = load double, double* %11, align 8 + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 0 + %__qsVar2__nTerms__ = load i64, i64* %13, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64 }* + %16 = getelementptr inbounds { i64 }, { i64 }* %15, i32 0, i32 0 + store i64 %idx, i64* %16, align 4 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__generatorSystemFunction__, %Tuple* %14, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { { %Array*, %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %__qsVar4__generatorIndex__ = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %19, align 8 + %20 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 0 + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %29, %Tuple* %30) + %31 = bitcast %Tuple* %30 to { { %Callable* }* }* + %32 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Callable* }*, { %Callable* }** %32, align 8 + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { double, %Array* }* + %39 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 1 + store double %stepsize, double* %39, align 8 + store %Array* %qubits, %Array** %40, align 8 + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { %Array*, { double, %Array* }* }* + %43 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %43, align 8 + store { double, %Array* }* %38, { double, %Array* }** %44, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %41, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + %45 = bitcast { %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body({ i64, %Callable* }* %generatorSystemA, { i64, %Callable* }* %generatorSystemB) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystemA, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %generatorSystemA to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystemB, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { i64, %Callable* }* %generatorSystemB to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %nTermsA = call i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystemA) + %nTermsB = call i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystemB) + %generatorIndexFunctionA = call %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystemA) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + %generatorIndexFunctionB = call %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystemB) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef3___AddGeneratorSystems____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionB, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, i64, %Callable*, %Callable* }* getelementptr ({ %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Callable*, i64, i64, %Callable*, %Callable* }* + %9 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 4 + store %Callable* %6, %Callable** %9, align 8 + store i64 %nTermsA, i64* %10, align 4 + store i64 %nTermsB, i64* %11, align 4 + store %Callable* %generatorIndexFunctionA, %Callable** %12, align 8 + store %Callable* %generatorIndexFunctionB, %Callable** %13, align 8 + %generatorIndexFunction = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__57__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__33__FunctionTable, %Tuple* %7) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %14 = add i64 %nTermsA, %nTermsB + %15 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %14, %Callable* %generatorIndexFunction) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunction, i32 -1) + ret { i64, %Callable* }* %15 +} + +define internal i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorIndexFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + ret i64 %nTerms +} + +define internal %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorIndexFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + ret %Callable* %generatorIndexFunction +} + +define internal void @Lifted__PartialApplication__57__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 1 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 2 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 3 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 4 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, i64, %Callable*, %Callable* }* getelementptr ({ i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, i64, i64, %Callable*, %Callable* }* + %14 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 4 + store i64 %2, i64* %14, align 4 + store i64 %5, i64* %15, align 4 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Callable* %11, %Callable** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3___AddGeneratorSystems____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load i64, i64* %1, align 4 + %7 = load i64, i64* %2, align 4 + %8 = load i64, i64* %3, align 4 + %9 = load %Callable*, %Callable** %4, align 8 + %10 = load %Callable*, %Callable** %5, align 8 + %11 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation____QsRef3___AddGeneratorSystems____body(i64 %6, i64 %7, i64 %8, %Callable* %9, %Callable* %10) + %12 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %11, { { %Array*, %Array* }*, %Array* }** %13, align 8 + ret void +} + +define internal void @MemoryManagement__33__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__33__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Simulation__AdiabaticStateEnergyUnitary__body(%Callable* %statePrepUnitary, %Callable* %adiabaticUnitary, %Callable* %qpeUnitary, %Callable* %phaseEstAlgorithm, %Array* %qubits) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %adiabaticUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adiabaticUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %qpeUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qpeUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %qubits, %Array** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %statePrepUnitary, %Tuple* %0, %Tuple* null) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array* }* + %5 = getelementptr inbounds { %Array* }, { %Array* }* %4, i32 0, i32 0 + store %Array* %qubits, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %adiabaticUnitary, %Tuple* %3, %Tuple* null) + %6 = call { %Callable* }* @Microsoft__Quantum__Oracles__OracleToDiscrete__body(%Callable* %qpeUnitary) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }*, %Array* }* getelementptr ({ { %Callable* }*, %Array* }, { { %Callable* }*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { { %Callable* }*, %Array* }* + %9 = getelementptr inbounds { { %Callable* }*, %Array* }, { { %Callable* }*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { { %Callable* }*, %Array* }, { { %Callable* }*, %Array* }* %8, i32 0, i32 1 + store { %Callable* }* %6, { %Callable* }** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %phaseEstAlgorithm, %Tuple* %7, %Tuple* %11) + %12 = bitcast %Tuple* %11 to { double }* + %13 = getelementptr inbounds { double }, { double }* %12, i32 0, i32 0 + %phaseEst = load double, double* %13, align 8 + %14 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %adiabaticUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adiabaticUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %qpeUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qpeUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + %16 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret double %phaseEst +} + +define internal { %Callable* }* @Microsoft__Quantum__Oracles__OracleToDiscrete__body(%Callable* %blackBoxOracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %blackBoxOracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %blackBoxOracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %blackBoxOracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %blackBoxOracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Callable* %blackBoxOracle, %Callable** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__75__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__43__FunctionTable, %Tuple* %1) + %6 = call { %Callable* }* @Microsoft__Quantum__Oracles__DiscreteOracle__body(%Callable* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %blackBoxOracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %blackBoxOracle, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + ret { %Callable* }* %6 +} + +define internal double @Microsoft__Quantum__Simulation__EstimateEnergyWithAdiabaticEvolution__body(i64 %nQubits, %Callable* %statePrepUnitary, %Callable* %adiabaticUnitary, %Callable* %qpeUnitary, %Callable* %phaseEstAlgorithm) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %adiabaticUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adiabaticUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %qpeUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qpeUnitary, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 1) + %qubits = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %phaseEst = call double @Microsoft__Quantum__Simulation__AdiabaticStateEnergyUnitary__body(%Callable* %statePrepUnitary, %Callable* %adiabaticUnitary, %Callable* %qpeUnitary, %Callable* %phaseEstAlgorithm, %Array* %qubits) + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePrepUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %adiabaticUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %adiabaticUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %qpeUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %qpeUnitary, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %phaseEstAlgorithm, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %qubits) + ret double %phaseEst +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body(i64 %idxTerm) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i64* + store i64 0, i64* %2, align 4 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to double* + store double 0.000000e+00, double* %5, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %0, %Array** %8, align 8 + store %Array* %3, %Array** %9, align 8 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 0) + %12 = bitcast i8* %11 to i64* + store i64 0, i64* %12, align 4 + %13 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %7, %Array* %10) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %13 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__IdentityGeneratorSystem__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 0, %Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body(i64 %2) + %4 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %5 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %4, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %3, { { %Array*, %Array* }*, %Array* }** %5, align 8 + ret void +} + +define internal i2 @Microsoft__Quantum__Simulation__IntToPauli__body(i64 %idx) { +entry: + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 1) + %3 = bitcast i8* %2 to i2* + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 2) + %5 = bitcast i8* %4 to i2* + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 3) + %7 = bitcast i8* %6 to i2* + store i2 0, i2* %1, align 1 + store i2 1, i2* %3, align 1 + store i2 -1, i2* %5, align 1 + store i2 -2, i2* %7, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %idx) + %9 = bitcast i8* %8 to i2* + %10 = load i2, i2* %9, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + ret i2 %10 +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__body(%Array* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, { %Array* }* }*, { %Array*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctl(%Array* %3, { %Array*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, { %Array* }* }*, { %Array*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctladj(%Array* %3, { %Array*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__MultiplexerFromGenerator__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable* }* + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load %Callable*, %Callable** %2, align 8 + %5 = call %Callable* @Microsoft__Quantum__Canon__MultiplexerFromGenerator__body(i64 %3, %Callable* %4) + %6 = bitcast %Tuple* %result-tuple to { %Callable* }* + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + store %Callable* %5, %Callable** %7, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__body(%Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__body(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array*, { %Array* }* }* + %8 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 2 + store double 0.000000e+00, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %qubits, { %Array* }** %10, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %7) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array*, { %Array* }* }* + %8 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 2 + store double 0.000000e+00, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %qubits, { %Array* }** %10, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %7) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__58__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }* }*, { { %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }* }*, %Array*, %Array* }* getelementptr ({ { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Callable* }* }*, %Array*, %Array* }* + %10 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %9, i32 0, i32 2 + store { { %Callable* }* }* %2, { { %Callable* }* }** %10, align 8 + store %Array* %5, %Array** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__58__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }* }*, { { %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }* }*, %Array*, %Array* }* getelementptr ({ { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Callable* }* }*, %Array*, %Array* }* + %10 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %9, i32 0, i32 2 + store { { %Callable* }* }* %2, { { %Callable* }* }** %10, align 8 + store %Array* %5, %Array** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__58__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }* }*, { { %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }* }*, %Array*, %Array* }* getelementptr ({ { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Callable* }* }*, %Array*, %Array* }* + %14 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %13, i32 0, i32 2 + store { { %Callable* }* }* %7, { { %Callable* }* }** %14, align 8 + store %Array* %9, %Array** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Callable* }* }*, %Array*, %Array* }* %13, { { { %Callable* }* }*, %Array*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__58__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }* }*, { { %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }* }*, %Array*, %Array* }* getelementptr ({ { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Callable* }* }*, %Array*, %Array* }* + %14 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %13, i32 0, i32 2 + store { { %Callable* }* }* %7, { { %Callable* }* }** %14, align 8 + store %Array* %9, %Array** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Callable* }* }*, %Array*, %Array* }* %13, { { { %Callable* }* }*, %Array*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }* }*, %Array*, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }* }*, { { %Callable* }* }** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____body({ { %Callable* }* }* %4, %Array* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }* }*, %Array*, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }* }*, %Array*, %Array* }, { { { %Callable* }* }*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }* }*, { { %Callable* }* }** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____adj({ { %Callable* }* }* %4, %Array* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }* }*, %Array*, %Array* }*, { { { %Callable* }* }*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____ctl(%Array* %3, { { { %Callable* }* }*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }, { %Array*, { { { %Callable* }* }*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }* }*, %Array*, %Array* }*, { { { %Callable* }* }*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__ApplyQuantumWalkByQubitization____ctladj(%Array* %3, { { { %Callable* }* }*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__34__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }* }*, { { %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = bitcast { { %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__34__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }* }* }, { %Callable*, { { %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }* }*, { { %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = bitcast { { %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__SimulationAlgorithm__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Simulation__AddGeneratorSystems__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %5 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body({ i64, %Callable* }* %3, { i64, %Callable* }* %4) + %6 = bitcast %Tuple* %result-tuple to { { i64, %Callable* }* }* + %7 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %6, i32 0, i32 0 + store { i64, %Callable* }* %5, { i64, %Callable* }** %7, align 8 + ret void +} + +define internal void @Lifted__PartialApplication__59__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %6 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 1 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 2 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %14 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store i64 %4, i64* %15, align 4 + store double %7, double* %16, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__59__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %6 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 1 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 2 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %14 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store i64 %4, i64* %15, align 4 + store double %7, double* %16, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__59__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 1 + %13 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 2 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 4 + store double %7, double* %18, align 8 + store i64 %9, i64* %19, align 4 + store double %11, double* %20, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %13, { { %Callable* }*, { i64, %Callable* }* }** %21, align 8 + store %Array* %15, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__59__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 1 + %13 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 2 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 4 + store double %7, double* %18, align 8 + store i64 %9, i64* %19, align 4 + store double %11, double* %20, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %13, { { %Callable* }*, { i64, %Callable* }* }** %21, align 8 + store %Array* %15, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load i64, i64* %2, align 4 + %8 = load double, double* %3, align 8 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____body(double %6, i64 %7, double %8, { { %Callable* }*, { i64, %Callable* }* }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load i64, i64* %2, align 4 + %8 = load double, double* %3, align 8 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____adj(double %6, i64 %7, double %8, { { %Callable* }*, { i64, %Callable* }* }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____ctl(%Array* %3, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__TrotterSimulationAlgorithmImpl____ctladj(%Array* %3, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__35__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__35__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__60__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, double, %Array* }* + %4 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %2, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + store i64 %5, i64* %13, align 4 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__60__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, double, %Array* }* + %4 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %2, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + store i64 %5, i64* %13, align 4 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__60__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, double, %Array* }*, { i64, double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 1 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 2 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %16 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %7, { { %Callable* }*, { i64, %Callable* }* }** %16, align 8 + store i64 %9, i64* %17, align 4 + store double %11, double* %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__60__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, double, %Array* }*, { i64, double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 1 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 2 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %16 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %7, { { %Callable* }*, { i64, %Callable* }* }** %16, align 8 + store i64 %9, i64* %17, align 4 + store double %11, double* %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %5 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %6 = load i64, i64* %2, align 4 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____body({ { %Callable* }*, { i64, %Callable* }* }* %5, i64 %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %5 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %6 = load i64, i64* %2, align 4 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____adj({ { %Callable* }*, { i64, %Callable* }* }* %5, i64 %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____ctl(%Array* %3, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef3__TrotterStepImpl____ctladj(%Array* %3, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__36__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__36__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__61__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__61__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__61__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__61__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____body(%Callable* %statePreparation, %Callable* %selector, { %Array* }* %auxiliary, %Array* %system) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %auxiliary, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %auxiliary to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, %Array* }* + %5 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %4, i32 0, i32 1 + store %Callable* %selector, %Callable** %5, align 8 + store %Array* %system, %Array** %6, align 8 + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__62__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__37__FunctionTable, %Tuple* %3) + call void @Microsoft__Quantum__Canon___0f498e1ece294d4db89965060edcd2ac_ApplyWithCA__body(%Callable* %statePreparation, %Callable* %7, { %Array* }* %auxiliary) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__62__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Array* }*, %Array* }* + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %5, align 8 + store %Array* %2, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__62__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Array* }*, %Array* }* + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %5, align 8 + store %Array* %2, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__62__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 1 + store { %Array* }* %4, { %Array* }** %10, align 8 + store %Array* %7, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array* }*, %Array* }* %9, { { %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__62__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 1 + store { %Array* }* %4, { %Array* }** %10, align 8 + store %Array* %7, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array* }*, %Array* }* %9, { { %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @MemoryManagement__37__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__37__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____adj(%Callable* %statePreparation, %Callable* %selector, { %Array* }* %auxiliary, %Array* %system) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %auxiliary, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %auxiliary to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, %Array* }* + %5 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %4, i32 0, i32 1 + store %Callable* %selector, %Callable** %5, align 8 + store %Array* %system, %Array** %6, align 8 + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__63__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__37__FunctionTable, %Tuple* %3) + call void @Microsoft__Quantum__Canon___0f498e1ece294d4db89965060edcd2ac_ApplyWithCA__adj(%Callable* %statePreparation, %Callable* %7, { %Array* }* %auxiliary) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__63__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Array* }*, %Array* }* + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %5, align 8 + store %Array* %2, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__63__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Array* }*, %Array* }* + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %5, align 8 + store %Array* %2, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__63__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 1 + store { %Array* }* %4, { %Array* }** %10, align 8 + store %Array* %7, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array* }*, %Array* }* %9, { { %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__63__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 1 + store { %Array* }* %4, { %Array* }** %10, align 8 + store %Array* %7, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array* }*, %Array* }* %9, { { %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %statePreparation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %selector = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %auxiliary = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %auxiliary, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %auxiliary to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 3 + %system = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %selector, %Callable** %10, align 8 + store %Array* %system, %Array** %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__64__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__37__FunctionTable, %Tuple* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, { %Array* }* }* getelementptr ({ %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Callable*, %Callable*, { %Array* }* }* + %15 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %14, i32 0, i32 1 + %17 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %14, i32 0, i32 2 + store %Callable* %statePreparation, %Callable** %15, align 8 + store %Callable* %12, %Callable** %16, align 8 + store { %Array* }* %auxiliary, { %Array* }** %17, align 8 + call void @Microsoft__Quantum__Canon___0f498e1ece294d4db89965060edcd2ac_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, { %Array* }* }* %14) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__64__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Array* }*, %Array* }* + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %5, align 8 + store %Array* %2, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__64__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Array* }*, %Array* }* + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %5, align 8 + store %Array* %2, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__64__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 1 + store { %Array* }* %4, { %Array* }** %10, align 8 + store %Array* %7, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array* }*, %Array* }* %9, { { %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__64__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 1 + store { %Array* }* %4, { %Array* }** %10, align 8 + store %Array* %7, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array* }*, %Array* }* %9, { { %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %statePreparation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %selector = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %auxiliary = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %auxiliary, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %auxiliary to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 3 + %system = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePreparation, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %selector, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %system, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Array* }* + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %9, i32 0, i32 1 + store %Callable* %selector, %Callable** %10, align 8 + store %Array* %system, %Array** %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__65__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__37__FunctionTable, %Tuple* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, { %Array* }* }* getelementptr ({ %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Callable*, %Callable*, { %Array* }* }* + %15 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %14, i32 0, i32 1 + %17 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }* }, { %Callable*, %Callable*, { %Array* }* }* %14, i32 0, i32 2 + store %Callable* %statePreparation, %Callable** %15, align 8 + store %Callable* %12, %Callable** %16, align 8 + store { %Array* }* %auxiliary, { %Array* }** %17, align 8 + call void @Microsoft__Quantum__Canon___0f498e1ece294d4db89965060edcd2ac_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, { %Array* }* }* %14) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %selector, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %system, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %statePreparation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__65__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Array* }*, %Array* }* + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %5, align 8 + store %Array* %2, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__65__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { { %Array* }*, %Array* }* + %5 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %5, align 8 + store %Array* %2, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__65__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 1 + store { %Array* }* %4, { %Array* }** %10, align 8 + store %Array* %7, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array* }*, %Array* }* %9, { { %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__65__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %9, i32 0, i32 1 + store { %Array* }* %4, { %Array* }** %10, align 8 + store %Array* %7, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array* }*, %Array* }* %9, { { %Array* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__66__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %5, i32 0, i32 0 + %7 = load { %Array* }*, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, { %Array* }*, %Array* }* getelementptr ({ %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Callable*, { %Array* }*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %11, i32 0, i32 3 + store %Callable* %2, %Callable** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store { %Array* }* %7, { %Array* }** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__66__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %5, i32 0, i32 0 + %7 = load { %Array* }*, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, { %Array* }*, %Array* }* getelementptr ({ %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Callable*, { %Array* }*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %11, i32 0, i32 3 + store %Callable* %2, %Callable** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store { %Array* }* %7, { %Array* }** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__66__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, { %Array* }*, %Array* }* getelementptr ({ %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Callable*, %Callable*, { %Array* }*, %Array* }* + %16 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, i32 0, i32 3 + store %Callable* %7, %Callable** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store { %Array* }* %11, { %Array* }** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, { %Callable*, %Callable*, { %Array* }*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__66__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, { %Array* }*, %Array* }* getelementptr ({ %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Callable*, %Callable*, { %Array* }*, %Array* }* + %16 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, i32 0, i32 3 + store %Callable* %7, %Callable** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store { %Array* }* %11, { %Array* }** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Callable*, %Callable*, { %Array* }*, %Array* }* %15, { %Callable*, %Callable*, { %Array* }*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Callable*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 3 + %5 = load %Callable*, %Callable** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____body(%Callable* %5, %Callable* %6, { %Array* }* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Callable*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Callable*, %Callable*, { %Array* }*, %Array* }, { %Callable*, %Callable*, { %Array* }*, %Array* }* %0, i32 0, i32 3 + %5 = load %Callable*, %Callable** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____adj(%Callable* %5, %Callable* %6, { %Array* }* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, %Callable*, { %Array* }*, %Array* }*, { %Callable*, %Callable*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____ctl(%Array* %3, { %Callable*, %Callable*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }, { %Array*, { %Callable*, %Callable*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, %Callable*, { %Array* }*, %Array* }*, { %Callable*, %Callable*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation___cf163a106d11461497cf5b799872fa06___QsRef3__ApplyBlockEncodingByLCU____ctladj(%Array* %3, { %Callable*, %Callable*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__38__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__38__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____body(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____adj(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____ctl(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____ctladj(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____body(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %actualControl, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %__qsVar0__actualControl__, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %actualControl, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %__qsVar0__actualControl__, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____body(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array* }* + %5 = getelementptr inbounds { %Array* }, { %Array* }* %4, i32 0, i32 0 + store %Array* %1, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %bareOp, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____adj(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + store %Array* %1, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %4, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____ctl(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____ctladj(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Preparation____QsRef3__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficients, { %Range, i64 }* %0) { +entry: + %plan = alloca %Array*, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %10 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %10, align 4 + %11 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %11, align 4 + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %12, %Array** %plan, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = call { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef3__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) + %14 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 0 + %disentanglingY = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 1) + %15 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 1 + %disentanglingZ = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 2 + %newCoefficients = load %Array*, %Array** %16, align 8 + %17 = call i64 @__quantum__rt__array_get_size_1d(%Array* %newCoefficients) + %18 = sub i64 %17, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %19 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %20 = icmp sle i64 %19, %18 + br i1 %20, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %19) + %22 = bitcast i8* %21 to { double, double }** + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %19, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 1) + %26 = call i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingZ) + br i1 %26, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__2 + %27 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 3 + %34 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 4 + %35 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 5 + store %Callable* %27, %Callable** %30, align 8 + store double %tolerance, double* %31, align 8 + store %Array* %disentanglingZ, %Array** %32, align 8 + store i2 -2, i2* %33, align 1 + store %Range %rngControl, %Range* %34, align 4 + store i64 %idxTarget, i64* %35, align 4 + %36 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__67__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__39__FunctionTable, %Tuple* %28) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to %Callable** + store %Callable* %36, %Callable** %39, align 8 + %40 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 0) + %42 = bitcast i8* %41 to %Callable** + store %Callable* %36, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 1) + br label %header__3 + +continue__1: ; preds = %exit__4, %exit__2 + %43 = call i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingY) + br i1 %43, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %44 = load %Array*, %Array** %plan, align 8 + %45 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 1) + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %48 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 0 + %49 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 1 + %50 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 2 + %51 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 3 + %52 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 4 + %53 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 5 + store %Callable* %45, %Callable** %48, align 8 + store double %tolerance, double* %49, align 8 + store %Array* %disentanglingY, %Array** %50, align 8 + store i2 -1, i2* %51, align 1 + store %Range %rngControl, %Range* %52, align 4 + store i64 %idxTarget, i64* %53, align 4 + %54 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__68__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__39__FunctionTable, %Tuple* %46) + %55 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to %Callable** + store %Callable* %54, %Callable** %57, align 8 + %58 = call %Array* @__quantum__rt__array_concatenate(%Array* %44, %Array* %55) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %58) + %60 = sub i64 %59, 1 + br label %header__5 + +continue__2: ; preds = %exit__9, %continue__1 + %61 = call i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rngControl) + br i1 %61, label %then0__3, label %test1__1 + +then0__3: ; preds = %continue__2 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 0) + %63 = bitcast i8* %62 to { double, double }** + %64 = load { double, double }*, { double, double }** %63, align 8 + %65 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 0 + %abs = load double, double* %65, align 8 + %66 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 1 + %arg = load double, double* %66, align 8 + %67 = call double @Microsoft__Quantum__Math__AbsD__body(double %arg) + %68 = fcmp ogt double %67, %tolerance + br i1 %68, label %then0__4, label %continue__4 + +then0__4: ; preds = %then0__3 + %69 = load %Array*, %Array** %plan, align 8 + %70 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %71 = fmul double -1.000000e+00, %arg + %72 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %73 = bitcast %Tuple* %72 to { %Callable*, double, i64 }* + %74 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 0 + %75 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 1 + %76 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 2 + store %Callable* %70, %Callable** %74, align 8 + store double %71, double* %75, align 8 + store i64 %idxTarget, i64* %76, align 4 + %77 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__69__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__40__FunctionTable, %Tuple* %72) + %78 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to %Callable** + store %Callable* %77, %Callable** %80, align 8 + %81 = call %Array* @__quantum__rt__array_concatenate(%Array* %69, %Array* %78) + %82 = call i64 @__quantum__rt__array_get_size_1d(%Array* %81) + %83 = sub i64 %82, 1 + br label %header__10 + +continue__4: ; preds = %exit__14, %then0__3 + br label %continue__3 + +test1__1: ; preds = %continue__2 + %84 = call i1 @Microsoft__Quantum__Canon____QsRef3__AnyOutsideToleranceCP____body(double %tolerance, %Array* %newCoefficients) + br i1 %84, label %then1__1, label %continue__3 + +then1__1: ; preds = %test1__1 + %85 = extractvalue %Range %rngControl, 0 + %86 = extractvalue %Range %rngControl, 1 + %87 = extractvalue %Range %rngControl, 2 + %88 = add i64 %85, 1 + %89 = extractvalue %Range %rngControl, 0 + %90 = extractvalue %Range %rngControl, 1 + %91 = extractvalue %Range %rngControl, 2 + %92 = extractvalue %Range %rngControl, 0 + %93 = extractvalue %Range %rngControl, 1 + %94 = extractvalue %Range %rngControl, 2 + %95 = insertvalue %Range zeroinitializer, i64 %88, 0 + %96 = insertvalue %Range %95, i64 %90, 1 + %newControl = insertvalue %Range %96, i64 %94, 2 + %newTarget = extractvalue %Range %rngControl, 0 + %97 = extractvalue %Range %rngControl, 1 + %98 = extractvalue %Range %rngControl, 2 + %99 = load %Array*, %Array** %plan, align 8 + %100 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %101 = bitcast %Tuple* %100 to { %Range, i64 }* + %102 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 0 + %103 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 1 + store %Range %newControl, %Range* %102, align 4 + store i64 %newTarget, i64* %103, align 4 + %104 = call %Array* @Microsoft__Quantum__Preparation____QsRef3__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %newCoefficients, { %Range, i64 }* %101) + %105 = call %Array* @__quantum__rt__array_concatenate(%Array* %99, %Array* %104) + %106 = call i64 @__quantum__rt__array_get_size_1d(%Array* %105) + %107 = sub i64 %106, 1 + br label %header__15 + +continue__3: ; preds = %exit__19, %test1__1, %continue__4 + %108 = load %Array*, %Array** %plan, align 8 + %109 = sub i64 %1, 1 + br label %header__20 + +header__3: ; preds = %exiting__3, %then0__1 + %110 = phi i64 [ 0, %then0__1 ], [ %115, %exiting__3 ] + %111 = icmp sle i64 %110, 0 + br i1 %111, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 %110) + %113 = bitcast i8* %112 to %Callable** + %114 = load %Callable*, %Callable** %113, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %114, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %114, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %115 = add i64 %110, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + store %Array* %40, %Array** %plan, align 8 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %116 = phi i64 [ 0, %exit__3 ], [ %121, %exiting__4 ] + %117 = icmp sle i64 %116, 0 + br i1 %117, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 %116) + %119 = bitcast i8* %118 to %Callable** + %120 = load %Callable*, %Callable** %119, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %120, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %120, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %121 = add i64 %116, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + br label %continue__1 + +header__5: ; preds = %exiting__5, %then0__2 + %122 = phi i64 [ 0, %then0__2 ], [ %127, %exiting__5 ] + %123 = icmp sle i64 %122, %60 + br i1 %123, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %122) + %125 = bitcast i8* %124 to %Callable** + %126 = load %Callable*, %Callable** %125, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %126, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %126, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %127 = add i64 %122, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 1) + %128 = sub i64 %59, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %129 = phi i64 [ 0, %exit__5 ], [ %134, %exiting__6 ] + %130 = icmp sle i64 %129, %128 + br i1 %130, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %129) + %132 = bitcast i8* %131 to %Callable** + %133 = load %Callable*, %Callable** %132, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %133, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %133, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %134 = add i64 %129, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %135 = call i64 @__quantum__rt__array_get_size_1d(%Array* %44) + %136 = sub i64 %135, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %137 = phi i64 [ 0, %exit__6 ], [ %142, %exiting__7 ] + %138 = icmp sle i64 %137, %136 + br i1 %138, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %139 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %137) + %140 = bitcast i8* %139 to %Callable** + %141 = load %Callable*, %Callable** %140, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %141, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %141, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %142 = add i64 %137, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %143 = sub i64 %135, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %144 = phi i64 [ 0, %exit__7 ], [ %149, %exiting__8 ] + %145 = icmp sle i64 %144, %143 + br i1 %145, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %144) + %147 = bitcast i8* %146 to %Callable** + %148 = load %Callable*, %Callable** %147, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %148, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %148, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %149 = add i64 %144, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + store %Array* %58, %Array** %plan, align 8 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %150 = phi i64 [ 0, %exit__8 ], [ %155, %exiting__9 ] + %151 = icmp sle i64 %150, 0 + br i1 %151, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 %150) + %153 = bitcast i8* %152 to %Callable** + %154 = load %Callable*, %Callable** %153, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %154, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %154, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %155 = add i64 %150, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 -1) + br label %continue__2 + +header__10: ; preds = %exiting__10, %then0__4 + %156 = phi i64 [ 0, %then0__4 ], [ %161, %exiting__10 ] + %157 = icmp sle i64 %156, %83 + br i1 %157, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %158 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %156) + %159 = bitcast i8* %158 to %Callable** + %160 = load %Callable*, %Callable** %159, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %160, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %160, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %161 = add i64 %156, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %162 = sub i64 %82, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %163) + %166 = bitcast i8* %165 to %Callable** + %167 = load %Callable*, %Callable** %166, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %167, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %167, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 1) + %169 = call i64 @__quantum__rt__array_get_size_1d(%Array* %69) + %170 = sub i64 %169, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %176, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %171) + %174 = bitcast i8* %173 to %Callable** + %175 = load %Callable*, %Callable** %174, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %175, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %175, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %176 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 -1) + %177 = sub i64 %169, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %178 = phi i64 [ 0, %exit__12 ], [ %183, %exiting__13 ] + %179 = icmp sle i64 %178, %177 + br i1 %179, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %180 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %178) + %181 = bitcast i8* %180 to %Callable** + %182 = load %Callable*, %Callable** %181, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %182, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %182, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %183 = add i64 %178, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + store %Array* %81, %Array** %plan, align 8 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %184 = phi i64 [ 0, %exit__13 ], [ %189, %exiting__14 ] + %185 = icmp sle i64 %184, 0 + br i1 %185, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 %184) + %187 = bitcast i8* %186 to %Callable** + %188 = load %Callable*, %Callable** %187, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %188, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %188, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %189 = add i64 %184, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + br label %continue__4 + +header__15: ; preds = %exiting__15, %then1__1 + %190 = phi i64 [ 0, %then1__1 ], [ %195, %exiting__15 ] + %191 = icmp sle i64 %190, %107 + br i1 %191, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %190) + %193 = bitcast i8* %192 to %Callable** + %194 = load %Callable*, %Callable** %193, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %194, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %194, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %195 = add i64 %190, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 1) + %196 = sub i64 %106, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %197 = phi i64 [ 0, %exit__15 ], [ %202, %exiting__16 ] + %198 = icmp sle i64 %197, %196 + br i1 %198, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %199 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %197) + %200 = bitcast i8* %199 to %Callable** + %201 = load %Callable*, %Callable** %200, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %201, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %201, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %202 = add i64 %197, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %105, i32 1) + %203 = call i64 @__quantum__rt__array_get_size_1d(%Array* %99) + %204 = sub i64 %203, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %205 = phi i64 [ 0, %exit__16 ], [ %210, %exiting__17 ] + %206 = icmp sle i64 %205, %204 + br i1 %206, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %205) + %208 = bitcast i8* %207 to %Callable** + %209 = load %Callable*, %Callable** %208, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %209, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %209, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %210 = add i64 %205, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + %211 = sub i64 %203, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %212 = phi i64 [ 0, %exit__17 ], [ %217, %exiting__18 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %212) + %215 = bitcast i8* %214 to %Callable** + %216 = load %Callable*, %Callable** %215, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %216, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %216, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %217 = add i64 %212, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 -1) + store %Array* %105, %Array** %plan, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %100, i32 -1) + %218 = call i64 @__quantum__rt__array_get_size_1d(%Array* %104) + %219 = sub i64 %218, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %220 = phi i64 [ 0, %exit__18 ], [ %225, %exiting__19 ] + %221 = icmp sle i64 %220, %219 + br i1 %221, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 %220) + %223 = bitcast i8* %222 to %Callable** + %224 = load %Callable*, %Callable** %223, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %224, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %224, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %225 = add i64 %220, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + br label %continue__3 + +header__20: ; preds = %exiting__20, %continue__3 + %226 = phi i64 [ 0, %continue__3 ], [ %232, %exiting__20 ] + %227 = icmp sle i64 %226, %109 + br i1 %227, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %228 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %226) + %229 = bitcast i8* %228 to { double, double }** + %230 = load { double, double }*, { double, double }** %229, align 8 + %231 = bitcast { double, double }* %230 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %231, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %232 = add i64 %226, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %233 = call i64 @__quantum__rt__array_get_size_1d(%Array* %108) + %234 = sub i64 %233, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %235 = phi i64 [ 0, %exit__20 ], [ %240, %exiting__21 ] + %236 = icmp sle i64 %235, %234 + br i1 %236, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %235) + %238 = bitcast i8* %237 to %Callable** + %239 = load %Callable*, %Callable** %238, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %239, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %239, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %240 = add i64 %235, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %108, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 -1) + %241 = sub i64 %17, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %242 = phi i64 [ 0, %exit__21 ], [ %248, %exiting__22 ] + %243 = icmp sle i64 %242, %241 + br i1 %243, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %242) + %245 = bitcast i8* %244 to { double, double }** + %246 = load { double, double }*, { double, double }** %245, align 8 + %247 = bitcast { double, double }* %246 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %248 = add i64 %242, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 -1) + %249 = sub i64 %17, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %250 = phi i64 [ 0, %exit__22 ], [ %256, %exiting__23 ] + %251 = icmp sle i64 %250, %249 + br i1 %251, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %252 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %250) + %253 = bitcast i8* %252 to { double, double }** + %254 = load { double, double }*, { double, double }** %253, align 8 + %255 = bitcast { double, double }* %254 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %255, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %256 = add i64 %250, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_reference_count(%Array* %newCoefficients, i32 -1) + %257 = bitcast { %Array*, %Array*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %257, i32 -1) + ret %Array* %108 +} + +define internal { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef3__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) { +entry: + %newCoefficients = alloca %Array*, align 8 + %disentanglingY = alloca %Array*, align 8 + %disentanglingZ = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sdiv i64 %0, 2 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %9) + %11 = sub i64 %9, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %12) + %15 = bitcast i8* %14 to double* + store double 0.000000e+00, double* %15, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %10, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %17 = sdiv i64 %0, 2 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %17) + %19 = sub i64 %17, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %20 = phi i64 [ 0, %exit__2 ], [ %24, %exiting__3 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %20) + %23 = bitcast i8* %22 to double* + store double 0.000000e+00, double* %23, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %24 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %25 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %26 = sdiv i64 %0, 2 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %26) + %28 = sub i64 %26, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %29 = phi i64 [ 0, %exit__3 ], [ %34, %exiting__4 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + store { double, double }* %25, { double, double }** %32, align 8 + %33 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %34 = add i64 %29, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + store %Array* %27, %Array** %newCoefficients, align 8 + %35 = sub i64 %26, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %36 = phi i64 [ 0, %exit__4 ], [ %42, %exiting__5 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %41, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %42 = add i64 %36, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %43 = sub i64 %0, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__5 + br label %header__6 + +header__6: ; preds = %exiting__6, %preheader__1 + %idxCoeff = phi i64 [ 0, %preheader__1 ], [ %80, %exiting__6 ] + %44 = icmp sle i64 %idxCoeff, %43 + %45 = icmp sge i64 %idxCoeff, %43 + %46 = select i1 true, i1 %44, i1 %45 + br i1 %46, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = add i64 %idxCoeff, 1 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %50) + %52 = bitcast i8* %51 to { double, double }** + %53 = load { double, double }*, { double, double }** %52, align 8 + %54 = call { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %49, { double, double }* %53) + %55 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 0 + %rt = load { double, double }*, { double, double }** %55, align 8 + %56 = bitcast { double, double }* %rt to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 1 + %phi = load double, double* %57, align 8 + %58 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 2 + %theta = load double, double* %58, align 8 + %59 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %59, i32 -1) + %60 = call %Array* @__quantum__rt__array_copy(%Array* %59, i1 false) + %61 = fmul double 5.000000e-01, %phi + %62 = sdiv i64 %idxCoeff, 2 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %62) + %64 = bitcast i8* %63 to double* + store double %61, double* %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 1) + store %Array* %60, %Array** %disentanglingZ, align 8 + %65 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = call %Array* @__quantum__rt__array_copy(%Array* %65, i1 false) + %67 = fmul double 5.000000e-01, %theta + %68 = sdiv i64 %idxCoeff, 2 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 %68) + %70 = bitcast i8* %69 to double* + %71 = load double, double* %70, align 8 + store double %67, double* %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + store %Array* %66, %Array** %disentanglingY, align 8 + %72 = load %Array*, %Array** %newCoefficients, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = call %Array* @__quantum__rt__array_copy(%Array* %72, i1 false) + %74 = sdiv i64 %idxCoeff, 2 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %74) + %76 = bitcast i8* %75 to { double, double }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 1) + %77 = load { double, double }*, { double, double }** %76, align 8 + %78 = bitcast { double, double }* %77 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i32 -1) + store { double, double }* %rt, { double, double }** %76, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 1) + store %Array* %73, %Array** %newCoefficients, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + %79 = bitcast { { double, double }*, double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %idxCoeff, 2 + br label %header__6 + +exit__6: ; preds = %header__6 + %81 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %82 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 1) + %83 = load %Array*, %Array** %newCoefficients, align 8 + %84 = call i64 @__quantum__rt__array_get_size_1d(%Array* %83) + %85 = sub i64 %84, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %86 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %87 = icmp sle i64 %86, %85 + br i1 %87, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %86) + %89 = bitcast i8* %88 to { double, double }** + %90 = load { double, double }*, { double, double }** %89, align 8 + %91 = bitcast { double, double }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %86, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 1) + %93 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Array* }* getelementptr ({ %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* null, i32 1) to i64)) + %94 = bitcast %Tuple* %93 to { %Array*, %Array*, %Array* }* + %95 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 0 + %96 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 1 + %97 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 2 + store %Array* %81, %Array** %95, align 8 + store %Array* %82, %Array** %96, align 8 + store %Array* %83, %Array** %97, align 8 + %98 = sub i64 %0, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %99 = phi i64 [ 0, %exit__7 ], [ %105, %exiting__8 ] + %100 = icmp sle i64 %99, %98 + br i1 %100, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %99) + %102 = bitcast i8* %101 to { double, double }** + %103 = load { double, double }*, { double, double }** %102, align 8 + %104 = bitcast { double, double }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %105 = add i64 %99, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + %106 = sub i64 %84, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %107 = phi i64 [ 0, %exit__8 ], [ %113, %exiting__9 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %107) + %110 = bitcast i8* %109 to { double, double }** + %111 = load { double, double }*, { double, double }** %110, align 8 + %112 = bitcast { double, double }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %112, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %113 = add i64 %107, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %83, i32 -1) + %114 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + %115 = sub i64 %84, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %116 = phi i64 [ 0, %exit__9 ], [ %122, %exiting__10 ] + %117 = icmp sle i64 %116, %115 + br i1 %117, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %116) + %119 = bitcast i8* %118 to { double, double }** + %120 = load { double, double }*, { double, double }** %119, align 8 + %121 = bitcast { double, double }* %120 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %121, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %122 = add i64 %116, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + ret { %Array*, %Array*, %Array* }* %94 +} + +define internal void @Lifted__PartialApplication__67__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__67__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__67__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__67__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____body(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____adj(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____ctl(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyMultiplexStep____ctladj(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__39__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__39__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__68__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__68__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__68__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__68__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__69__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__69__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__69__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__69__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____body(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____adj(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____ctl(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyGlobalRotationStep____ctladj(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__40__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__40__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %a0, { double, double }* %a1) { +entry: + %0 = bitcast { double, double }* %a0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = bitcast { double, double }* %a1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %abs0 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a0) + %abs1 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a1) + %arg0 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a0) + %arg1 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a1) + %2 = fmul double %abs0, %abs0 + %3 = fmul double %abs1, %abs1 + %d = fadd double %2, %3 + %r = call double @__quantum__qis__sqrt__body(double %d) + %4 = fadd double %arg0, %arg1 + %t = fmul double 5.000000e-01, %4 + %phi = fsub double %arg1, %arg0 + %5 = call double @__quantum__qis__arctan2__body(double %abs1, double %abs0) + %theta = fmul double 2.000000e+00, %5 + %6 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %r, double %t) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }*, double, double }* getelementptr ({ { double, double }*, double, double }, { { double, double }*, double, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { { double, double }*, double, double }* + %9 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 2 + store { double, double }* %6, { double, double }** %9, align 8 + store double %phi, double* %10, align 8 + store double %theta, double* %11, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + ret { { double, double }*, double, double }* %8 +} + +define internal %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %nQubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = trunc i64 %nQubits to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %12 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___4adfaffa1d224736a6c92f5abc9f739b_Padded__body(i64 %11, { double, double }* %12, %Array* %coefficients) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %22 = icmp sgt i64 %nQubits, 1 + %23 = sub i64 %nQubits, 1 + %24 = insertvalue %Range { i64 1, i64 1, i64 0 }, i64 %23, 2 + %rngControl = select i1 %22, %Range %24, %Range { i64 1, i64 1, i64 0 } + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Range, i64 }* + %27 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 1 + store %Range %rngControl, %Range* %27, align 4 + store i64 0, i64* %28, align 4 + %plan = call %Array* @Microsoft__Quantum__Preparation____QsRef3__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficientsPadded, { %Range, i64 }* %26) + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %plan) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %31) + %34 = bitcast i8* %33 to %Callable** + %35 = load %Callable*, %Callable** %34, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %35, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 1) + %unprepare = call %Callable* @Microsoft__Quantum__Canon___5516aa15311e4ec2bc23553ec55e6745_BoundCA__body(%Array* %plan) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 1) + %37 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %38 = call %Callable* @__quantum__rt__callable_copy(%Callable* %unprepare, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %38) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, %Callable* }* + %41 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 1 + store %Callable* %37, %Callable** %41, align 8 + store %Callable* %38, %Callable** %42, align 8 + %43 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__70__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__41__FunctionTable, %Tuple* %39) + %44 = sub i64 %0, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %51, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %45) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = bitcast { double, double }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %50, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %51 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %52 = sub i64 %13, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %53 = phi i64 [ 0, %exit__4 ], [ %59, %exiting__5 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %53) + %56 = bitcast i8* %55 to { double, double }** + %57 = load { double, double }*, { double, double }** %56, align 8 + %58 = bitcast { double, double }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %59 = add i64 %53, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + %60 = sub i64 %29, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %61 = phi i64 [ 0, %exit__5 ], [ %66, %exiting__6 ] + %62 = icmp sle i64 %61, %60 + br i1 %62, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %61) + %64 = bitcast i8* %63 to %Callable** + %65 = load %Callable*, %Callable** %64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %65, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %65, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %66 = add i64 %61, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 -1) + %67 = bitcast { double, double }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %67, i32 -1) + %68 = sub i64 %13, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %69 = phi i64 [ 0, %exit__6 ], [ %75, %exiting__7 ] + %70 = icmp sle i64 %69, %68 + br i1 %70, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %69) + %72 = bitcast i8* %71 to { double, double }** + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %75 = add i64 %69, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + %76 = sub i64 %29, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %77 = phi i64 [ 0, %exit__7 ], [ %82, %exiting__8 ] + %78 = icmp sle i64 %77, %76 + br i1 %78, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %77) + %80 = bitcast i8* %79 to %Callable** + %81 = load %Callable*, %Callable** %80, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %81, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %81, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %82 = add i64 %77, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unprepare, i32 -1) + ret %Callable* %43 +} + +define internal void @Lifted__PartialApplication__70__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__70__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__70__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__70__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____body(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____adj(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____ctl(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef3__ApplyToLittleEndian____ctladj(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @MemoryManagement__41__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__41__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %13 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %12) + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %11, %Tuple* null) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %13 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %12) + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %13, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %14) + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %11, %Tuple* null) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %17 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %16) + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { %Array* }* }* + %21 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %21, align 8 + store { %Array* }* %qubits, { %Array* }** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %23 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %24) + %27 = bitcast i8* %26 to { double, double }** + %28 = load { double, double }*, { double, double }** %27, align 8 + %29 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %17 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %16) + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { %Array* }* }* + %21 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %21, align 8 + store { %Array* }* %qubits, { %Array* }** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %23 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %24) + %27 = bitcast i8* %26 to { double, double }** + %28 = load { double, double }*, { double, double }** %27, align 8 + %29 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double 0.000000e+00, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__71__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__42__FunctionTable, %Tuple* %4) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @Microsoft__Quantum__Canon___f695a1c4a84a4c22814bb23e1fd09776_Compose__body(%Callable* %8, %Callable* %9) + %coefficientsAsComplexPolar = call %Array* @Microsoft__Quantum__Arrays___214772ffffeb49a1900df09d24a690d9_Mapped__body(%Callable* %10, %Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsAsComplexPolar) + %12 = sub i64 %11, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %13 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double %tolerance, %Array* %coefficientsAsComplexPolar, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %20 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %21) + %24 = bitcast i8* %23 to { double, double }** + %25 = load { double, double }*, { double, double }** %24, align 8 + %26 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %28 = sub i64 %11, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__71__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Math__ComplexPolar__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { { double, double }* }* + %7 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %6, i32 0, i32 0 + store { double, double }* %5, { double, double }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__42__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__42__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Math__AbsD__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = call double @Microsoft__Quantum__Math__AbsD__body(double %2) + %4 = bitcast %Tuple* %result-tuple to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + store double %3, double* %5, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double 0.000000e+00, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__72__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__42__FunctionTable, %Tuple* %4) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @Microsoft__Quantum__Canon___f695a1c4a84a4c22814bb23e1fd09776_Compose__body(%Callable* %8, %Callable* %9) + %__qsVar0__coefficientsAsComplexPolar__ = call %Array* @Microsoft__Quantum__Arrays___214772ffffeb49a1900df09d24a690d9_Mapped__body(%Callable* %10, %Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__) + %12 = sub i64 %11, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %13 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double %tolerance, %Array* %__qsVar0__coefficientsAsComplexPolar__, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %20 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %21) + %24 = bitcast i8* %23 to { double, double }** + %25 = load { double, double }*, { double, double }** %24, align 8 + %26 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %28 = sub i64 %11, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__72__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double 0.000000e+00, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__73__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__42__FunctionTable, %Tuple* %8) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = call %Callable* @Microsoft__Quantum__Canon___f695a1c4a84a4c22814bb23e1fd09776_Compose__body(%Callable* %12, %Callable* %13) + %coefficientsAsComplexPolar = call %Array* @Microsoft__Quantum__Arrays___214772ffffeb49a1900df09d24a690d9_Mapped__body(%Callable* %14, %Array* %coefficients) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsAsComplexPolar) + %16 = sub i64 %15, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %17 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 1) + %24 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array*, { %Array* }* }* + %34 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 2 + store double %tolerance, double* %34, align 8 + store %Array* %coefficientsAsComplexPolar, %Array** %35, align 8 + store { %Array* }* %qubits, { %Array* }** %36, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %33) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + %37 = sub i64 %15, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %38) + %41 = bitcast i8* %40 to { double, double }** + %42 = load { double, double }*, { double, double }** %41, align 8 + %43 = bitcast { double, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + %45 = sub i64 %15, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %46) + %49 = bitcast i8* %48 to { double, double }** + %50 = load { double, double }*, { double, double }** %49, align 8 + %51 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + %53 = sub i64 %15, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %60, %exiting__5 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %54) + %57 = bitcast i8* %56 to { double, double }** + %58 = load { double, double }*, { double, double }** %57, align 8 + %59 = bitcast { double, double }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %60 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__73__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double 0.000000e+00, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__74__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__42__FunctionTable, %Tuple* %8) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = call %Callable* @Microsoft__Quantum__Canon___f695a1c4a84a4c22814bb23e1fd09776_Compose__body(%Callable* %12, %Callable* %13) + %__qsVar0__coefficientsAsComplexPolar__ = call %Array* @Microsoft__Quantum__Arrays___214772ffffeb49a1900df09d24a690d9_Mapped__body(%Callable* %14, %Array* %coefficients) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__) + %16 = sub i64 %15, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %17 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + %24 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array*, { %Array* }* }* + %34 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 2 + store double %tolerance, double* %34, align 8 + store %Array* %__qsVar0__coefficientsAsComplexPolar__, %Array** %35, align 8 + store { %Array* }* %qubits, { %Array* }** %36, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %33) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + %37 = sub i64 %15, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %38) + %41 = bitcast i8* %40 to { double, double }** + %42 = load { double, double }*, { double, double }** %41, align 8 + %43 = bitcast { double, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + %45 = sub i64 %15, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %46) + %49 = bitcast i8* %48 to { double, double }** + %50 = load { double, double }*, { double, double }** %49, align 8 + %51 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + %53 = sub i64 %15, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %60, %exiting__5 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %54) + %57 = bitcast i8* %56 to { double, double }** + %58 = load { double, double }*, { double, double }** %57, align 8 + %59 = bitcast { double, double }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %60 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__74__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__adj(%Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %4) + %7 = bitcast i8* %6 to { double, double }** + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %11, align 8 + %12 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { double, %Array*, { %Array* }* }* + %25 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 2 + store double 0.000000e+00, double* %25, align 8 + store %Array* %coefficients, %Array** %26, align 8 + store { %Array* }* %qubits, { %Array* }** %27, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %36 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %43, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %37) + %40 = bitcast i8* %39 to { double, double }** + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %43 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %4) + %7 = bitcast i8* %6 to { double, double }** + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %11, align 8 + %12 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { double, %Array*, { %Array* }* }* + %25 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 2 + store double 0.000000e+00, double* %25, align 8 + store %Array* %coefficients, %Array** %26, align 8 + store { %Array* }* %qubits, { %Array* }** %27, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %36 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %43, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %37) + %40 = bitcast i8* %39 to { double, double }** + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %43 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Oracles__DiscreteOracle__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Lifted__PartialApplication__75__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, %Array* }* + %4 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, i64, %Array* }* + %10 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store i64 %5, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__75__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, %Array* }* + %4 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, i64, %Array* }* + %10 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store i64 %5, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__75__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Array* }*, { i64, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Callable*, i64, %Array* }* + %14 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %13, i32 0, i32 2 + store %Callable* %7, %Callable** %14, align 8 + store i64 %9, i64* %15, align 4 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, i64, %Array* }* }* getelementptr ({ %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { %Callable*, i64, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { %Callable*, i64, %Array* }* %13, { %Callable*, i64, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__75__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Array* }* }, { %Array*, { i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Array* }*, { i64, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Callable*, i64, %Array* }* + %14 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %13, i32 0, i32 2 + store %Callable* %7, %Callable** %14, align 8 + store i64 %9, i64* %15, align 4 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, i64, %Array* }* }* getelementptr ({ %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { %Callable*, i64, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { %Callable*, i64, %Array* }* %13, { %Callable*, i64, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____body(%Callable* %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____adj(%Callable* %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, i64, %Array* }*, { %Callable*, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____ctl(%Array* %3, { %Callable*, i64, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, i64, %Array* }* }, { %Array*, { %Callable*, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, i64, %Array* }*, { %Callable*, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___ae485b361e91482f9489aa577506f9b4___QsRef3__ApplyOperationRepeatedlyCA____ctladj(%Array* %3, { %Callable*, i64, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__43__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__43__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +attributes #0 = { nofree nosync nounwind readnone speculatable willreturn } diff --git a/src/munchkin/tests/qsharp/minified-oracle-generator/Library.qs b/src/munchkin/tests/qsharp/minified-oracle-generator/Library.qs new file mode 100644 index 0000000..b282370 --- /dev/null +++ b/src/munchkin/tests/qsharp/minified-oracle-generator/Library.qs @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +namespace Microsoft.Quantum.OracleGenerator { + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Measurement; + + @EntryPoint() + operation RunProgram(arg: Bool) : Unit { + use f = Qubit(); + within { + if arg { X(f); } + } apply { + let result = IsResultOne(M(f)); + } + } +} \ No newline at end of file diff --git a/src/munchkin/tests/qsharp/minified-oracle-generator/libLLVM.dll b/src/munchkin/tests/qsharp/minified-oracle-generator/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/minified-oracle-generator/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/minified-oracle-generator/minified-oracle-generator.csproj b/src/munchkin/tests/qsharp/minified-oracle-generator/minified-oracle-generator.csproj new file mode 100644 index 0000000..32693fd --- /dev/null +++ b/src/munchkin/tests/qsharp/minified-oracle-generator/minified-oracle-generator.csproj @@ -0,0 +1,17 @@ + + + + Exe + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + Always + + + + diff --git a/src/munchkin/tests/qsharp/minified-oracle-generator/qir/minified-oracle-generator.ll b/src/munchkin/tests/qsharp/minified-oracle-generator/qir/minified-oracle-generator.ll new file mode 100644 index 0000000..ec4bf70 --- /dev/null +++ b/src/munchkin/tests/qsharp/minified-oracle-generator/qir/minified-oracle-generator.ll @@ -0,0 +1,148 @@ + +%Qubit = type opaque +%Result = type opaque +%Array = type opaque +%String = type opaque + +@0 = internal constant [3 x i8] c"()\00" + +define internal void @Microsoft__Quantum__OracleGenerator__RunProgram__body(i1 %arg) { +entry: + %f = call %Qubit* @__quantum__rt__qubit_allocate() + br i1 %arg, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %f) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %f) + %__qsVar0__result__ = call i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %0) + br i1 %arg, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__qis__x__body(%Qubit* %f) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %f) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %input) { +entry: + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %input, %Result* %0) + ret i1 %1 +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define void @Microsoft__Quantum__OracleGenerator__RunProgram__Interop(i8 %arg) #0 { +entry: + %0 = trunc i8 %arg to i1 + call void @Microsoft__Quantum__OracleGenerator__RunProgram__body(i1 %0) + ret void +} + +define void @Microsoft__Quantum__OracleGenerator__RunProgram(i8 %arg) #1 { +entry: + %0 = trunc i8 %arg to i1 + call void @Microsoft__Quantum__OracleGenerator__RunProgram__body(i1 %0) + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + ret void +} + +declare void @__quantum__rt__message(%String*) + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/munchkin/tests/qsharp/oracle-generator/Library.qs b/src/munchkin/tests/qsharp/oracle-generator/Library.qs new file mode 100644 index 0000000..78efb39 --- /dev/null +++ b/src/munchkin/tests/qsharp/oracle-generator/Library.qs @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +namespace Microsoft.Quantum.OracleGenerator { + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Measurement; + + operation Majority3(inputs : (Qubit, Qubit, Qubit), output : Qubit) : Unit { + // The implementation of this operation will be + // automatically derived from the description in + // OracleGenerator.Classical.Majority3 + } + + @EntryPoint() + operation RunProgram() : Unit { + InitOracleGeneratorFor(Microsoft.Quantum.OracleGenerator.Classical.Majority3); + + for ca in [false, true] { + for cb in [false, true] { + for cc in [false, true] { + use (a, b, c) = (Qubit(), Qubit(), Qubit()) { + if ca { X(a); } + if cb { X(b); } + if cc { X(c); } + + let r1 = IsResultOne(MResetZ(a)); + let r2 = IsResultOne(MResetZ(b)); + let r3 = IsResultOne(MResetZ(c)); + let result = (r1 or r2) and (r1 or r3) and (r2 or r3); + + // Majority3((a, b, c), f); + // let result = IsResultOne(MResetZ(b)); + + Message($"{cc} {cb} {ca} -> {result}"); + } + } + } + } + } + + // The QIR compiler optimizes code and removes functions and operations that + // are never used. By calling this function we ensure that (i) the function + // for which the operation should be generated and (ii) intrinsic operations + // used to implement the generated operation (X, CNOT, CCNOT) are present in + // the QIR file emitted by the Q# compiler. + internal function InitOracleGeneratorFor<'In, 'Out>(func : 'In -> 'Out) : Unit { + let _ = Microsoft.Quantum.Intrinsic.X; + let _ = Microsoft.Quantum.Intrinsic.CNOT; + let _ = Microsoft.Quantum.Intrinsic.CCNOT; + let _ = func; + } +} + +namespace Microsoft.Quantum.OracleGenerator.Classical { + // This is the classical implementation that serves as blueprint to generate + // the empty Majority3 operation automatically. Note that the input type + // tuple and the output type correspond to the two inputs to the operation, + // where `Bool` corresponds to `Qubit`. + // + // This function might return a `Bool` tuple type to represent multi-output + // Boolean functions. Then, the second argument in the operation must be a + // `Qubit` tuple of equal size. + internal function Majority3(a : Bool, b : Bool, c : Bool) : Bool { + return (a or b) and (a or c) and (b or c); + } +} \ No newline at end of file diff --git a/src/munchkin/tests/qsharp/oracle-generator/libLLVM.dll b/src/munchkin/tests/qsharp/oracle-generator/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/oracle-generator/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/oracle-generator/oracle-generator.csproj b/src/munchkin/tests/qsharp/oracle-generator/oracle-generator.csproj new file mode 100644 index 0000000..32693fd --- /dev/null +++ b/src/munchkin/tests/qsharp/oracle-generator/oracle-generator.csproj @@ -0,0 +1,17 @@ + + + + Exe + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + Always + + + + diff --git a/src/munchkin/tests/qsharp/oracle-generator/qir/oracle-generator.ll b/src/munchkin/tests/qsharp/oracle-generator/qir/oracle-generator.ll new file mode 100644 index 0000000..513298a --- /dev/null +++ b/src/munchkin/tests/qsharp/oracle-generator/qir/oracle-generator.ll @@ -0,0 +1,689 @@ + +%Tuple = type opaque +%Callable = type opaque +%Array = type opaque +%Qubit = type opaque +%Result = type opaque +%String = type opaque + +@Microsoft__Quantum__OracleGenerator__Classical__Majority3__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__OracleGenerator__Classical__Majority3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@0 = internal constant [5 x i8] c"true\00" +@1 = internal constant [6 x i8] c"false\00" +@2 = internal constant [2 x i8] c" \00" +@3 = internal constant [5 x i8] c" -> \00" +@Microsoft__Quantum__Intrinsic__X__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__CNOT__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__CCNOT__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CCNOT__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CCNOT__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CCNOT__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__CCNOT__ctladj__wrapper] +@4 = internal constant [3 x i8] c"()\00" + +define internal void @Microsoft__Quantum__OracleGenerator__RunProgram__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__OracleGenerator__Classical__Majority3__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__OracleGenerator___db80fb546e9242059ab5fcd88d8543b5_InitOracleGeneratorFor__body(%Callable* %0) + %1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 0) + %3 = bitcast i8* %2 to i1* + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 1) + %5 = bitcast i8* %4 to i1* + store i1 false, i1* %3, align 1 + store i1 true, i1* %5, align 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %6 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %7 = icmp sle i64 %6, 1 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %6) + %9 = bitcast i8* %8 to i1* + %ca = load i1, i1* %9, align 1 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 0) + %12 = bitcast i8* %11 to i1* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 1) + %14 = bitcast i8* %13 to i1* + store i1 false, i1* %12, align 1 + store i1 true, i1* %14, align 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %15 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void + +header__2: ; preds = %exiting__2, %body__1 + %16 = phi i64 [ 0, %body__1 ], [ %25, %exiting__2 ] + %17 = icmp sle i64 %16, 1 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %16) + %19 = bitcast i8* %18 to i1* + %cb = load i1, i1* %19, align 1 + %20 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 0) + %22 = bitcast i8* %21 to i1* + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 1) + %24 = bitcast i8* %23 to i1* + store i1 false, i1* %22, align 1 + store i1 true, i1* %24, align 1 + br label %header__3 + +exiting__2: ; preds = %exit__3 + %25 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + br label %exiting__1 + +header__3: ; preds = %exiting__3, %body__2 + %26 = phi i64 [ 0, %body__2 ], [ %58, %exiting__3 ] + %27 = icmp sle i64 %26, 1 + br i1 %27, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 %26) + %29 = bitcast i8* %28 to i1* + %cc = load i1, i1* %29, align 1 + %a = call %Qubit* @__quantum__rt__qubit_allocate() + %b = call %Qubit* @__quantum__rt__qubit_allocate() + %c = call %Qubit* @__quantum__rt__qubit_allocate() + br i1 %ca, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__3 + call void @__quantum__qis__x__body(%Qubit* %a) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__3 + br i1 %cb, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__qis__x__body(%Qubit* %b) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + br i1 %cc, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + call void @__quantum__qis__x__body(%Qubit* %c) + br label %continue__3 + +continue__3: ; preds = %then0__3, %continue__2 + %30 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %a) + %r1 = call i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %30) + %31 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %b) + %r2 = call i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %31) + %32 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %c) + %r3 = call i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %32) + %33 = or i1 %r1, %r2 + br i1 %33, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %continue__3 + %34 = or i1 %r1, %r3 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %continue__3 + %35 = phi i1 [ %34, %condTrue__1 ], [ %33, %continue__3 ] + br i1 %35, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %condContinue__1 + %36 = or i1 %r2, %r3 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %condContinue__1 + %result = phi i1 [ %36, %condTrue__2 ], [ %35, %condContinue__1 ] + br i1 %cc, label %condTrue__3, label %condFalse__1 + +condTrue__3: ; preds = %condContinue__2 + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @0, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__1: ; preds = %condContinue__2 + %38 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @1, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__1, %condTrue__3 + %39 = phi %String* [ %37, %condTrue__3 ], [ %38, %condFalse__1 ] + %40 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i32 0, i32 0)) + %41 = call %String* @__quantum__rt__string_concatenate(%String* %39, %String* %40) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + br i1 %cb, label %condTrue__4, label %condFalse__2 + +condTrue__4: ; preds = %condContinue__3 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @0, i32 0, i32 0)) + br label %condContinue__4 + +condFalse__2: ; preds = %condContinue__3 + %43 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @1, i32 0, i32 0)) + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__2, %condTrue__4 + %44 = phi %String* [ %42, %condTrue__4 ], [ %43, %condFalse__2 ] + %45 = call %String* @__quantum__rt__string_concatenate(%String* %41, %String* %44) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + %46 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i32 0, i32 0)) + %47 = call %String* @__quantum__rt__string_concatenate(%String* %45, %String* %46) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + br i1 %ca, label %condTrue__5, label %condFalse__3 + +condTrue__5: ; preds = %condContinue__4 + %48 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @0, i32 0, i32 0)) + br label %condContinue__5 + +condFalse__3: ; preds = %condContinue__4 + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @1, i32 0, i32 0)) + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__3, %condTrue__5 + %50 = phi %String* [ %48, %condTrue__5 ], [ %49, %condFalse__3 ] + %51 = call %String* @__quantum__rt__string_concatenate(%String* %47, %String* %50) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %50, i32 -1) + %52 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @3, i32 0, i32 0)) + %53 = call %String* @__quantum__rt__string_concatenate(%String* %51, %String* %52) + call void @__quantum__rt__string_update_reference_count(%String* %51, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %52, i32 -1) + br i1 %result, label %condTrue__6, label %condFalse__4 + +condTrue__6: ; preds = %condContinue__5 + %54 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @0, i32 0, i32 0)) + br label %condContinue__6 + +condFalse__4: ; preds = %condContinue__5 + %55 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @1, i32 0, i32 0)) + br label %condContinue__6 + +condContinue__6: ; preds = %condFalse__4, %condTrue__6 + %56 = phi %String* [ %54, %condTrue__6 ], [ %55, %condFalse__4 ] + %57 = call %String* @__quantum__rt__string_concatenate(%String* %53, %String* %56) + call void @__quantum__rt__string_update_reference_count(%String* %53, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %56, i32 -1) + call void @__quantum__rt__message(%String* %57) + call void @__quantum__rt__result_update_reference_count(%Result* %30, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %57, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %a) + call void @__quantum__rt__qubit_release(%Qubit* %b) + call void @__quantum__rt__qubit_release(%Qubit* %c) + br label %exiting__3 + +exiting__3: ; preds = %condContinue__6 + %58 = add i64 %26, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + br label %exiting__2 +} + +define internal void @Microsoft__Quantum__OracleGenerator___db80fb546e9242059ab5fcd88d8543b5_InitOracleGeneratorFor__body(%Callable* %func) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %func, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %func, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %func, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %func, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__OracleGenerator__Classical__Majority3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, i1, i1 }* + %1 = getelementptr inbounds { i1, i1, i1 }, { i1, i1, i1 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i1, i1, i1 }, { i1, i1, i1 }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i1, i1, i1 }, { i1, i1, i1 }* %0, i32 0, i32 2 + %4 = load i1, i1* %1, align 1 + %5 = load i1, i1* %2, align 1 + %6 = load i1, i1* %3, align 1 + %7 = call i1 @Microsoft__Quantum__OracleGenerator__Classical__Majority3__body(i1 %4, i1 %5, i1 %6) + %8 = bitcast %Tuple* %result-tuple to { i1 }* + %9 = getelementptr inbounds { i1 }, { i1 }* %8, i32 0, i32 0 + store i1 %7, i1* %9, align 1 + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %input) { +entry: + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %input, %Result* %0) + ret i1 %1 +} + +define internal %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) { +entry: + %result = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %target) + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %0) + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret %Result* %result +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +declare void @__quantum__rt__message(%String*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal i1 @Microsoft__Quantum__OracleGenerator__Classical__Majority3__body(i1 %a, i1 %b, i1 %c) { +entry: + %0 = or i1 %a, %b + br i1 %0, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %1 = or i1 %a, %c + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %2 = phi i1 [ %1, %condTrue__1 ], [ %0, %entry ] + br i1 %2, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %condContinue__1 + %3 = or i1 %b, %c + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %condContinue__1 + %4 = phi i1 [ %3, %condTrue__2 ], [ %2, %condContinue__1 ] + ret i1 %4 +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Qubit*, %Qubit** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %3, { %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit* }*, { %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %3, { %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %4 = load %Qubit*, %Qubit** %1, align 8 + %5 = load %Qubit*, %Qubit** %2, align 8 + %6 = load %Qubit*, %Qubit** %3, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %4, %Qubit* %5, %Qubit* %6) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit*, %Qubit*, %Qubit* }* + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %4 = load %Qubit*, %Qubit** %1, align 8 + %5 = load %Qubit*, %Qubit** %2, align 8 + %6 = load %Qubit*, %Qubit** %3, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__adj(%Qubit* %4, %Qubit* %5, %Qubit* %6) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %3, { %Qubit*, %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }, { %Array*, { %Qubit*, %Qubit*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Qubit*, %Qubit*, %Qubit* }*, { %Qubit*, %Qubit*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__ctladj(%Array* %3, { %Qubit*, %Qubit*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 1) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %control1, %Qubit** %1, align 8 + store %Qubit* %control2, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__adj(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CCNOT__body(%Qubit* %control1, %Qubit* %control2, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control1 = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %control2 = load %Qubit*, %Qubit** %2, align 8 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %6 = bitcast i8* %5 to %Qubit** + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 1) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %control1, %Qubit** %6, align 8 + store %Qubit* %control2, %Qubit** %8, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CCNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control1 = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %control2 = load %Qubit*, %Qubit** %2, align 8 + %3 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Qubit*, %Qubit*, %Qubit* }* + %6 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Qubit*, %Qubit*, %Qubit* }, { %Qubit*, %Qubit*, %Qubit* }* %5, i32 0, i32 2 + store %Qubit* %control1, %Qubit** %6, align 8 + store %Qubit* %control2, %Qubit** %7, align 8 + store %Qubit* %target, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CCNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit*, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define void @Microsoft__Quantum__OracleGenerator__RunProgram__Interop() #0 { +entry: + call void @Microsoft__Quantum__OracleGenerator__RunProgram__body() + ret void +} + +define void @Microsoft__Quantum__OracleGenerator__RunProgram() #1 { +entry: + call void @Microsoft__Quantum__OracleGenerator__RunProgram__body() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @4, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/munchkin/tests/qsharp/parallel-half-moons/Host.cs b/src/munchkin/tests/qsharp/parallel-half-moons/Host.cs new file mode 100644 index 0000000..cde7191 --- /dev/null +++ b/src/munchkin/tests/qsharp/parallel-half-moons/Host.cs @@ -0,0 +1,158 @@ + +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +using Microsoft.Quantum.Simulation.Core; +using Microsoft.Quantum.Simulation.Simulators; +using System; +using System.IO; +using System.Linq; +using System.Text.Json; +using System.Runtime.InteropServices; +using System.Collections.Generic; +using System.Threading.Tasks; +using System.Reflection; +using static System.Math; + +namespace Microsoft.Quantum.Samples +{ + using Microsoft.Quantum.MachineLearning; + + class Program + { + static async Task Main() + { + // We start by loading the training and validation data from our JSON + // data file. + var data = await LoadData(Path.Join(Path.GetDirectoryName(Assembly.GetEntryAssembly().Location), "data.json")); + + // We then define the classifier parameters where we want to start our + // training iterations from. Since gradient descent is good at finding + // local optima, it's helpful to have a variety of different starting + // points. + var parameterStartingPoints = new [] + { + new [] {0.060057, 3.00522, 2.03083, 0.63527, 1.03771, 1.27881, 4.10186, 5.34396}, + new [] {0.586514, 3.371623, 0.860791, 2.92517, 1.14616, 2.99776, 2.26505, 5.62137}, + new [] {1.69704, 1.13912, 2.3595, 4.037552, 1.63698, 1.27549, 0.328671, 0.302282}, + new [] {5.21662, 6.04363, 0.224184, 1.53913, 1.64524, 4.79508, 1.49742, 1.5455} + }; + + // Convert samples to Q# form. + var samples = new QArray>(data.TrainingData.Features.Select(vector => new QArray(vector))); + + // Once we have the data loaded and have initialized our target machine, + // we can then use that target machine to train a QCC classifier. + var (optimizedParameters, optimizedBias, nMisses) = parameterStartingPoints + // We can use parallel LINQ (PLINQ) to convert the IEnumerable + // over starting points into a parallelized query. + .AsParallel() + // By default, PLINQ may or may not actually run our query in + // parallel, depending on the capabilities of your machine. + // We can force PLINQ to actually parallelize, however, by using + // the WithExecutionMode method. + .WithExecutionMode(ParallelExecutionMode.ForceParallelism) + // Many of the same LINQ methods are defined for PLINQ queries + // as well as IEnumerable objects, so we can go on and run + // the training loop in parallel by selecting on the start point. + .Select( + (startPoint, idxStartPoint) => + { + // Since we want each start point to run on its own + // instance of the full-state simulator, we create a new + // instance here, using C# 8's "using var" syntax to + // ensure that the simulator is deallocated once + // training is complete for this start point. + using var targetMachine = new QuantumSimulator(); + + // We attach a tag to log output so that we can tell + // each training job's messages apart. + // To do so, we disable the default output to the console + // and attach our own event with the index of the + // starting point that generated each message. + targetMachine.DisableLogToConsole(); + targetMachine.OnLog += message => + Console.WriteLine($"[{idxStartPoint}] {message}"); + + // Finally, we can call the Q# entry point with the + // samples, their labels, and our given start point. + return TrainHalfMoonModelAtStartPoint.Run( + targetMachine, + samples, + new QArray(data.TrainingData.Labels), + new QArray(startPoint) + ).Result; + } + ) + // We can then gather the results back into a sequential + // (IEnumerable) collection. + .AsSequential() + // Finally, we want to minimize over the number of misses, + // returning the corresponding sequential classifier model. + // In this case, we use a handy extension method defined below + // to perform the minimization. + .MinBy(result => result.Item3); + + // After training, we can use the validation data to test the accuracy + // of our new classifier. + using var targetMachine = new QuantumSimulator(); + var missRate = await ValidateHalfMoonModel.Run( + targetMachine, + new QArray>(data.ValidationData.Features.Select(vector => new QArray(vector))), + new QArray(data.ValidationData.Labels), + optimizedParameters, + optimizedBias + ); + System.Console.WriteLine($"Observed {100 * missRate:F2}% misclassifications."); + } + + class LabeledData + { + public List Features { get; set; } + public List Labels { get; set; } + } + + class DataSet + { + public LabeledData TrainingData { get; set; } + public LabeledData ValidationData { get; set; } + } + + static async Task LoadData(string dataPath) + { + using var dataReader = File.OpenRead(dataPath); + return await JsonSerializer.DeserializeAsync( + dataReader + ); + } + + } + + public static class LinqExtensions + { + /// + /// Minimizes over the elements of an enumerable, using a given + /// projection function to define the relative ordering between + /// elements. + /// + /// A source of elements to be minimized over. + /// + /// A projection function used to define comparisons between + /// elements + /// + /// + /// The element min of source such that by(min) + /// is minimized. In the case that two or more elements share the + /// same value of by, the first element will be returned. + /// + public static TSource MinBy(this IEnumerable source, Func by) + where TResult : IComparable => + source + .Aggregate( + (minimum, next) => + by(minimum).CompareTo(by(next)) < 0 + ? minimum + : next + ); + } +} diff --git a/src/munchkin/tests/qsharp/parallel-half-moons/Training.qs b/src/munchkin/tests/qsharp/parallel-half-moons/Training.qs new file mode 100644 index 0000000..7742d5a --- /dev/null +++ b/src/munchkin/tests/qsharp/parallel-half-moons/Training.qs @@ -0,0 +1,95 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Samples { + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.MachineLearning; + open Microsoft.Quantum.Math; + + function WithProductKernel(scale : Double, sample : Double[]) : Double[] { + return sample + [scale * Fold(TimesD, 1.0, sample)]; + } + + function Preprocessed(samples : Double[][]) : Double[][] { + let scale = 1.0; + + return Mapped( + WithProductKernel(scale, _), + samples + ); + } + + function DefaultSchedule(samples : Double[][]) : SamplingSchedule { + return SamplingSchedule([ + 0..Length(samples) - 1 + ]); + } + + function ClassifierStructure() : ControlledRotation[] { + return [ + ControlledRotation((0, new Int[0]), PauliX, 4), + ControlledRotation((0, new Int[0]), PauliZ, 5), + ControlledRotation((1, new Int[0]), PauliX, 6), + ControlledRotation((1, new Int[0]), PauliZ, 7), + ControlledRotation((0, [1]), PauliX, 0), + ControlledRotation((1, [0]), PauliX, 1), + ControlledRotation((1, new Int[0]), PauliZ, 2), + ControlledRotation((1, new Int[0]), PauliX, 3) + ]; + } + + operation TrainHalfMoonModelAtStartPoint( + trainingVectors : Double[][], + trainingLabels : Int[], + startPoint : Double[] + ) : (Double[], Double, Int) { + let samples = Mapped( + LabeledSample, + Zipped(Preprocessed(trainingVectors), trainingLabels) + ); + let options = DefaultTrainingOptions() + w/ LearningRate <- 0.1 + w/ MinibatchSize <- 15 + w/ Tolerance <- 0.005 + w/ NMeasurements <- 10000 + w/ MaxEpochs <- 2 + w/ VerboseMessage <- Message; + Message("Ready to train."); + // Train at the given start point, and get back an + // optimized model. + let (optimizedModel, nMisses) = TrainSequentialClassifierAtModel( + SequentialModel(ClassifierStructure(), startPoint, 0.0), + samples, + options, + DefaultSchedule(trainingVectors), + DefaultSchedule(trainingVectors) + ); + return (optimizedModel::Parameters, optimizedModel::Bias, nMisses); + } + + operation ValidateHalfMoonModel( + validationVectors : Double[][], + validationLabels : Int[], + parameters : Double[], + bias : Double + ) : Double { + let samples = Mapped( + LabeledSample, + Zipped(Preprocessed(validationVectors), validationLabels) + ); + let tolerance = 0.005; + let nMeasurements = 10000; + let results = ValidateSequentialClassifier( + SequentialModel(ClassifierStructure(), parameters, bias), + samples, + tolerance, + nMeasurements, + DefaultSchedule(validationVectors) + ); + return IntAsDouble(results::NMisclassifications) / IntAsDouble(Length(samples)); + } + +} diff --git a/src/munchkin/tests/qsharp/parallel-half-moons/data.json b/src/munchkin/tests/qsharp/parallel-half-moons/data.json new file mode 100644 index 0000000..aff01c7 --- /dev/null +++ b/src/munchkin/tests/qsharp/parallel-half-moons/data.json @@ -0,0 +1,774 @@ +{ + "TrainingData": { + "Features": [ + [ + -0.00035731158553797826, + -0.06346656877546791 + ], + [ + -0.0012062337869831463, + -0.15157022343854742 + ], + [ + -0.014980621149421162, + 0.7452612548840972 + ], + [ + -0.03536657731228199, + 0.36450905608465584 + ], + [ + -0.08658898925520085, + 0.8549594407803911 + ], + [ + -0.08780040610147566, + 0.3158448974605273 + ], + [ + -0.11041961535964329, + 0.029508550947450807 + ], + [ + -0.1236937998311684, + 0.8915912980454145 + ], + [ + -0.1381620621317352, + 0.100136328149671 + ], + [ + -0.14962800792227093, + 0.03490175212127222 + ], + [ + -0.16703979749984005, + 1.1061592722104854 + ], + [ + -0.18702040402999198, + 0.6909521716931064 + ], + [ + -0.274906632066767, + 0.94407037626692 + ], + [ + -0.40035386722930877, + 0.9640718547110079 + ], + [ + -0.42645222935272076, + 0.8084904468062676 + ], + [ + -0.44580392648704686, + 0.8279493796060237 + ], + [ + -0.4565691026559081, + 1.026935374542866 + ], + [ + -0.47115221818532443, + 1.1627266622606667 + ], + [ + -0.48113754393867386, + 1.0177743873407035 + ], + [ + -0.5573460215796026, + 0.9347902465129836 + ], + [ + -0.5629032983072818, + 0.7552388504286991 + ], + [ + -0.6349597854601028, + 0.43957861193628395 + ], + [ + -0.6559537333860002, + 0.5512056376068405 + ], + [ + -0.675859772306459, + 0.8831917431313464 + ], + [ + -0.6789437666097666, + 0.33724531848285233 + ], + [ + -0.7184386804472017, + 0.1970015971182835 + ], + [ + -0.7265600758299311, + 0.312525805601502 + ], + [ + -0.7471807053856604, + 0.6879169346941071 + ], + [ + -0.7501513130996611, + 0.5868387512254272 + ], + [ + -0.7675714408206668, + 0.3111027708930208 + ], + [ + -0.8211557055524259, + 0.7060386061700794 + ], + [ + -0.8523667503036682, + 0.8760469456594606 + ], + [ + -0.9318185748767674, + 0.5922375073085167 + ], + [ + -0.9360478798593356, + 0.7217802547976218 + ], + [ + -0.9458822433124261, + 0.5463806510445365 + ], + [ + -0.9927882574787499, + 0.37272854954816614 + ], + [ + -1.0794720450455788, + 0.0 + ], + [ + -1.0903971011972062, + 0.15043830790243995 + ], + [ + -1.1240595085750649, + 0.06868659592655377 + ], + [ + -1.214345522322524, + 0.0522205845910377 + ], + [ + -1.2478207683344034, + 0.09971541915389676 + ], + [ + 0.015790851198098987, + 0.8176667797938731 + ], + [ + 0.024025073005429665, + 0.19498421190371337 + ], + [ + 0.04070325327489788, + 0.249948540045528 + ], + [ + 0.05330457430838108, + 1.0564351286097744 + ], + [ + 0.08150549868348289, + 1.0466409152302647 + ], + [ + 0.13545436353822082, + -0.28735571909099844 + ], + [ + 0.13790002655139869, + 0.8079245980111084 + ], + [ + 0.14330248971296777, + 0.16878957858332952 + ], + [ + 0.15111162653375743, + -0.48726422061762564 + ], + [ + 0.15241120217118012, + 0.7882780938447811 + ], + [ + 0.1684510563335908, + -0.16516768291416295 + ], + [ + 0.1889645905680306, + 0.04426699518384147 + ], + [ + 0.19707578589348906, + -0.2380232166101135 + ], + [ + 0.21923185570294207, + 1.0258464978767086 + ], + [ + 0.2272375409453048, + -0.2660358584515484 + ], + [ + 0.23414554456624714, + -0.1821040168632353 + ], + [ + 0.23872423712311908, + -0.5521479642401027 + ], + [ + 0.28398615786926007, + -0.5053038255570522 + ], + [ + 0.2913326625188073, + 0.9961989575986329 + ], + [ + 0.3492829018038269, + 0.814349058873698 + ], + [ + 0.35875569081458425, + -0.07308833776422552 + ], + [ + 0.362865905625692, + 0.8093526658754205 + ], + [ + 0.3745589417707047, + -0.06441622857939078 + ], + [ + 0.3862048923032335, + 0.897812088282051 + ], + [ + 0.44463324389734504, + 0.8059192934499005 + ], + [ + 0.45996051267747096, + 1.0931047101235087 + ], + [ + 0.47441840271450164, + -0.5671490088420339 + ], + [ + 0.4792391502435683, + 0.8766676237020283 + ], + [ + 0.49674365935819675, + 0.7262766499720068 + ], + [ + 0.5254035987296022, + -0.5304074709596346 + ], + [ + 0.5515696762477782, + 0.7047403349020965 + ], + [ + 0.5583796023696284, + -0.3479717584215746 + ], + [ + 0.5637841185957758, + 0.7379114816809925 + ], + [ + 0.6077488386907419, + -0.6479590594758237 + ], + [ + 0.6336773725676798, + 0.4013212953811713 + ], + [ + 0.6417404109261109, + -0.2616928728716996 + ], + [ + 0.6468024469374531, + 0.5723741631150826 + ], + [ + 0.6519740444333493, + -0.39192657875894454 + ], + [ + 0.6671288712714325, + 0.7610985060640911 + ], + [ + 0.6932965033438983, + -0.5691093818942972 + ], + [ + 0.7169235943848092, + 0.8787011825169279 + ], + [ + 0.735662886386677, + -0.5139463255333367 + ], + [ + 0.7372698027976039, + 0.5213352096575902 + ], + [ + 0.7418878900815039, + 0.7002467277813453 + ], + [ + 0.7664851841222006, + -0.6514816641294779 + ], + [ + 0.7725057509267337, + 0.8420004414490583 + ], + [ + 0.7826795986676983, + 0.18277102726722777 + ], + [ + 0.7940103483510714, + 0.4372365151467599 + ], + [ + 0.7979464168249012, + 0.34798699949141526 + ], + [ + 0.8260330228824426, + -0.49820898981569917 + ], + [ + 0.842288240480912, + 0.36875379048411394 + ], + [ + 0.851971206551066, + 0.39348082552085084 + ], + [ + 0.86205044244621, + 0.19841586681120152 + ], + [ + 0.8649902905009432, + -0.8610121773585954 + ], + [ + 0.8918712515887249, + 0.04342395954912626 + ], + [ + 0.9330218289777374, + -0.3206460583629134 + ], + [ + 0.9415223350819799, + 0.09922080120808434 + ], + [ + 0.9803048280161961, + -0.4510040235742361 + ], + [ + 1.0208043681386143, + -0.8751310593605329 + ], + [ + 1.052488336309264, + 0.4328256302831184 + ], + [ + 1.0985310687439107, + 0.14973497839506208 + ], + [ + 1.1339205799760226, + -0.43403293167637014 + ], + [ + 1.1565732261285677, + -0.6595940690524519 + ], + [ + 1.1602020363874843, + -0.2934418893827161 + ], + [ + 1.2682099135173655, + -0.6278842301450488 + ], + [ + 1.2728317781228748, + 0.09730915590736164 + ], + [ + 1.2811702965638943, + -0.3447002832792715 + ], + [ + 1.314987405783858, + -0.3575003637607095 + ], + [ + 1.385688958936755, + -0.40535754675580093 + ], + [ + 1.3971199048085738, + -0.5457951846994613 + ], + [ + 1.5012513595709254, + -0.5463014889465386 + ], + [ + 1.5172077595677511, + -0.6206057874342353 + ], + [ + 1.5231911523171273, + -0.34631140825430407 + ], + [ + 1.580210458744003, + -0.15260672753941906 + ], + [ + 1.6079210210735444, + -0.19380515481338 + ], + [ + 1.6104405670902222, + -0.11669112572858897 + ], + [ + 1.6183753843243038, + -0.503965099128002 + ], + [ + 1.6201999823810802, + -0.33683064921830785 + ], + [ + 1.6576017564472596, + -0.4885701807109335 + ], + [ + 1.6845310670806912, + -0.2311093840793329 + ], + [ + 1.7410140105531124, + -0.3425370707868646 + ], + [ + 1.8363547966325138, + 0.14677633121227235 + ], + [ + 1.8486093722400083, + -0.2787461884436434 + ], + [ + 1.951337189006637, + 0.30510300479138863 + ], + [ + 1.9714210183447758, + 0.012927048499541016 + ], + [ + 1.9870163765527555, + 0.1911013992299178 + ], + [ + 1.9932458483843853, + -0.1645193522473022 + ], + [ + 2.0030979227790873, + 0.2522312038058796 + ], + [ + 2.013819388433135, + -0.12322354775036859 + ], + [ + 2.083597025712324, + 0.16834772265128853 + ], + [ + 2.1046622748873682, + 0.321969070147389 + ], + [ + 2.108720022072707, + -0.21072346710168843 + ], + [ + 2.124147293246285, + -0.07516641977035582 + ], + [ + 2.1433309328426424, + 0.4 + ], + [ + 2.207412007224068, + 0.1144911302646231 + ], + [ + 2.237584830750855, + 0.3595752334213297 + ] + ], + "Labels": [ + 0, + 0, + 1, + 0, + 1, + 0, + 0, + 1, + 0, + 0, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 1, + 1, + 0, + 1, + 0, + 0, + 1, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 1, + 1, + 0, + 1, + 0, + 1, + 1, + 1, + 0, + 1, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 0, + 1, + 1, + 0, + 1, + 1, + 1, + 1, + 0, + 1, + 1, + 1, + 0, + 1, + 0, + 1, + 0, + 0, + 1, + 1, + 0, + 0, + 0, + 0, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + }, + "ValidationData": { + "Features": [ + [ + 0.883898709935946, + 0.0 + ], + [ + 1.1793893456320759, + 0.3748347849076317 + ], + [ + 0.5231831310352743, + 0.900818803609586 + ], + [ + 0.30081607582696224, + 0.9615554053041235 + ], + [ + -0.06340157706002579, + 1.224735863277014 + ], + [ + -0.5382643095868677, + 1.0133143141607128 + ], + [ + -0.5975208094029268, + 0.561438878658366 + ], + [ + -0.766231190604878, + 0.19526118857958627 + ], + [ + 0.14060021668138722, + 0.4 + ], + [ + 0.05239183271831849, + -0.008592101868396151 + ], + [ + 0.36087228857768316, + -0.3470962748964499 + ], + [ + 0.7371834528946137, + -0.3046817236779634 + ], + [ + 1.0706889634601606, + -0.7207038198913537 + ], + [ + 1.487876633223481, + -0.37571084836184543 + ], + [ + 1.8726657414770358, + -0.16558447370672003 + ] + ], + "Labels": [ + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + ] + } +} \ No newline at end of file diff --git a/src/munchkin/tests/qsharp/parallel-half-moons/libLLVM.dll b/src/munchkin/tests/qsharp/parallel-half-moons/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/parallel-half-moons/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/parallel-half-moons/parallel-half-moons.csproj b/src/munchkin/tests/qsharp/parallel-half-moons/parallel-half-moons.csproj new file mode 100644 index 0000000..c557663 --- /dev/null +++ b/src/munchkin/tests/qsharp/parallel-half-moons/parallel-half-moons.csproj @@ -0,0 +1,23 @@ + + + + Library + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + + Always + + + + diff --git a/src/munchkin/tests/qsharp/parallel-half-moons/qir/VQE.ll b/src/munchkin/tests/qsharp/parallel-half-moons/qir/VQE.ll new file mode 100644 index 0000000..c2881c6 --- /dev/null +++ b/src/munchkin/tests/qsharp/parallel-half-moons/qir/VQE.ll @@ -0,0 +1,31688 @@ + +%Tuple = type opaque +%Array = type opaque +%Callable = type opaque +%Result = type opaque +%Qubit = type opaque +%Range = type { i64, i64, i64 } +%String = type opaque + +@Microsoft__Quantum__Intrinsic__Reset__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Reset__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@0 = internal constant [75 x i8] c"operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0.\00" +@PartialApplication__1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper] +@MemoryManagement__1__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper] +@MemoryManagement__2__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@Microsoft__Quantum__Intrinsic__H__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] +@PartialApplication__3__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__S__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] +@1 = internal constant [38 x i8] c"MultiplexPauli failed. Invalid pauli \00" +@2 = internal constant [7 x i8] c"PauliX\00" +@3 = internal constant [7 x i8] c"PauliY\00" +@4 = internal constant [7 x i8] c"PauliZ\00" +@5 = internal constant [7 x i8] c"PauliI\00" +@6 = internal constant [2 x i8] c".\00" +@PartialApplication__4__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@PartialApplication__5__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@PartialApplication__6__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@PartialApplication__7__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__ctladj__wrapper] +@PartialApplication__8__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__ctladj__wrapper] +@PartialApplication__9__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__ctladj__wrapper] +@PartialApplication__10__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@PartialApplication__11__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] +@PartialApplication__12__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__ctladj__wrapper] +@PartialApplication__13__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__ctladj__wrapper] +@Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctladj__wrapper] +@MemoryManagement__3__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__3__RefCount, void (%Tuple*, i32)* @MemoryManagement__3__AliasCount] +@PartialApplication__14__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__ctladj__wrapper] +@Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctladj__wrapper] +@MemoryManagement__4__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__4__RefCount, void (%Tuple*, i32)* @MemoryManagement__4__AliasCount] +@PartialApplication__15__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__ctladj__wrapper] +@Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctladj__wrapper] +@MemoryManagement__5__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__5__RefCount, void (%Tuple*, i32)* @MemoryManagement__5__AliasCount] +@7 = internal constant [47 x i8] c"Control register shorter than control pattern.\00" +@PartialApplication__16__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__ctladj__wrapper] +@Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctladj__wrapper] +@MemoryManagement__6__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__6__RefCount, void (%Tuple*, i32)* @MemoryManagement__6__AliasCount] +@PartialApplication__17__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctladj__wrapper] +@Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctladj__wrapper] +@MemoryManagement__7__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__7__RefCount, void (%Tuple*, i32)* @MemoryManagement__7__AliasCount] +@PartialApplication__18__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__8__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__8__RefCount, void (%Tuple*, i32)* @MemoryManagement__8__AliasCount] +@PartialApplication__19__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctladj__wrapper] +@Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj__wrapper] +@MemoryManagement__9__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__9__RefCount, void (%Tuple*, i32)* @MemoryManagement__9__AliasCount] +@PartialApplication__20__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctladj__wrapper] +@Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj__wrapper] +@PartialApplication__21__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctladj__wrapper] +@Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj__wrapper] +@MemoryManagement__10__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__10__RefCount, void (%Tuple*, i32)* @MemoryManagement__10__AliasCount] +@8 = internal constant [11 x i8] c"Odd order \00" +@9 = internal constant [20 x i8] c" not yet supported.\00" +@PartialApplication__22__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctladj__wrapper] +@Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctladj__wrapper] +@MemoryManagement__11__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__11__RefCount, void (%Tuple*, i32)* @MemoryManagement__11__AliasCount] +@PartialApplication__23__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__12__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__12__RefCount, void (%Tuple*, i32)* @MemoryManagement__12__AliasCount] +@Microsoft__Quantum__Simulation__IdentityGeneratorIndex__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Simulation__AddGeneratorSystems__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__24__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctladj__wrapper] +@MemoryManagement__13__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__13__RefCount, void (%Tuple*, i32)* @MemoryManagement__13__AliasCount] +@PartialApplication__25__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctladj__wrapper] +@Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctladj__wrapper] +@MemoryManagement__14__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__14__RefCount, void (%Tuple*, i32)* @MemoryManagement__14__AliasCount] +@PartialApplication__26__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctladj__wrapper] +@MemoryManagement__15__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__15__RefCount, void (%Tuple*, i32)* @MemoryManagement__15__AliasCount] +@10 = internal constant [71 x i8] c"Specified output array length must be longer than `inputArray` length.\00" +@11 = internal constant [39 x i8] c"Array must be of the length at least 1\00" +@12 = internal constant [22 x i8] c"Index is out of bound\00" +@PartialApplication__27__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__16__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__16__RefCount, void (%Tuple*, i32)* @MemoryManagement__16__AliasCount] +@13 = internal constant [36 x i8] c"Qubit in invalid state. Expecting: \00" +@14 = internal constant [2 x i8] c"\22\00" +@15 = internal constant [13 x i8] c"\0A\09Expected:\09\00" +@16 = internal constant [5 x i8] c"true\00" +@17 = internal constant [6 x i8] c"false\00" +@18 = internal constant [11 x i8] c"\0A\09Actual:\09\00" +@19 = internal constant [33 x i8] c"`bits` must be between 0 and 63 \00" +@20 = internal constant [34 x i8] c"`number` must be between 0 and 2^\00" +@21 = internal constant [15 x i8] c" - 1, but was \00" +@PartialApplication__28__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctladj__wrapper] +@MemoryManagement__17__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__17__RefCount, void (%Tuple*, i32)* @MemoryManagement__17__AliasCount] +@PartialApplication__29__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctladj__wrapper] +@PartialApplication__30__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctladj__wrapper] +@MemoryManagement__18__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__18__RefCount, void (%Tuple*, i32)* @MemoryManagement__18__AliasCount] +@PartialApplication__31__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctladj__wrapper] +@MemoryManagement__19__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__19__RefCount, void (%Tuple*, i32)* @MemoryManagement__19__AliasCount] +@PartialApplication__32__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Math__ComplexPolar__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__ComplexPolar__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__20__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__20__RefCount, void (%Tuple*, i32)* @MemoryManagement__20__AliasCount] +@Microsoft__Quantum__Math__AbsD__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__AbsD__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__33__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__34__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__35__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@22 = internal constant [46 x i8] c"Unitary coupled-cluster PQRS failed: indices \00" +@23 = internal constant [3 x i8] c", \00" +@24 = internal constant [18 x i8] c" must be distinct\00" +@25 = internal constant [44 x i8] c"Unitary coupled-cluster PQ failed: indices \00" +@PartialApplication__36__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__36__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctladj__wrapper] +@MemoryManagement__21__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__21__RefCount, void (%Tuple*, i32)* @MemoryManagement__21__AliasCount] +@PartialApplication__37__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__37__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj__wrapper] +@MemoryManagement__22__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__22__RefCount, void (%Tuple*, i32)* @MemoryManagement__22__AliasCount] +@26 = internal constant [86 x i8] c"ComputeJordanWignerString failed. `idxFermions` must contain an even number of terms.\00" +@27 = internal constant [46 x i8] c"ComputeJordanWignerString failed. fermionIdx \00" +@28 = internal constant [15 x i8] c" out of range.\00" +@29 = internal constant [47 x i8] c"Completely invalid cluster operator specified.\00" +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__38__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__38__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__23__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__23__RefCount, void (%Tuple*, i32)* @MemoryManagement__23__AliasCount] +@Microsoft__Quantum__Intrinsic__X__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctladj__wrapper] +@PartialApplication__39__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__39__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__24__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__24__RefCount, void (%Tuple*, i32)* @MemoryManagement__24__AliasCount] +@PartialApplication__40__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__40__ctladj__wrapper] +@MemoryManagement__25__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__25__RefCount, void (%Tuple*, i32)* @MemoryManagement__25__AliasCount] +@PartialApplication__41__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__41__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__26__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__26__RefCount, void (%Tuple*, i32)* @MemoryManagement__26__AliasCount] +@PartialApplication__42__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__42__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__Measure__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Measure__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__27__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__27__RefCount, void (%Tuple*, i32)* @MemoryManagement__27__AliasCount] +@PartialApplication__43__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__43__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Chemistry__HTermsToGenIdx__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__28__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__28__RefCount, void (%Tuple*, i32)* @MemoryManagement__28__AliasCount] + +define double @Microsoft__Quantum__Chemistry__VQE__GetEnergyVQE__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, double %theta1, double %theta2, double %theta3, i64 %nSamples) { +entry: + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 1 + %fermionTermData = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %fermionTermData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 2 + %inputState = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %JWInputStates = load %Array*, %Array** %63, align 8 + %64 = call i64 @__quantum__rt__array_get_size_1d(%Array* %JWInputStates) + %65 = sub i64 %64, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %66 = phi i64 [ 0, %exit__4 ], [ %77, %exiting__5 ] + %67 = icmp sle i64 %66, %65 + br i1 %67, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %66) + %69 = bitcast i8* %68 to { { double, double }*, %Array* }** + %70 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %69, align 8 + %71 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %70, i32 0, i32 0 + %72 = load { double, double }*, { double, double }** %71, align 8 + %73 = bitcast { double, double }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 1) + %74 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %70, i32 0, i32 1 + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 1) + %76 = bitcast { { double, double }*, %Array* }* %70 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %76, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %77 = add i64 %66, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + %78 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + %79 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 0 + %nSpinOrbitals = load i64, i64* %80, align 4 + %81 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %82 = phi i64 [ 0, %exit__5 ], [ %92, %exiting__6 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %82) + %85 = bitcast i8* %84 to { %Array*, %Array* }** + %86 = load { %Array*, %Array* }*, { %Array*, %Array* }** %85, align 8 + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + %89 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 1 + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 1) + %91 = bitcast { %Array*, %Array* }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %92 = add i64 %82, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %93 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %94 = phi i64 [ 0, %exit__6 ], [ %104, %exiting__7 ] + %95 = icmp sle i64 %94, %93 + br i1 %95, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %94) + %97 = bitcast i8* %96 to { %Array*, %Array* }** + %98 = load { %Array*, %Array* }*, { %Array*, %Array* }** %97, align 8 + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 1 + %102 = load %Array*, %Array** %101, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %103 = bitcast { %Array*, %Array* }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %104 = add i64 %94, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %105 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %106 = phi i64 [ 0, %exit__7 ], [ %116, %exiting__8 ] + %107 = icmp sle i64 %106, %105 + br i1 %107, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %106) + %109 = bitcast i8* %108 to { %Array*, %Array* }** + %110 = load { %Array*, %Array* }*, { %Array*, %Array* }** %109, align 8 + %111 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 0 + %112 = load %Array*, %Array** %111, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %112, i32 1) + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 1 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 1) + %115 = bitcast { %Array*, %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %115, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %116 = add i64 %106, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %117 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %118 = phi i64 [ 0, %exit__8 ], [ %128, %exiting__9 ] + %119 = icmp sle i64 %118, %117 + br i1 %119, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %118) + %121 = bitcast i8* %120 to { %Array*, %Array* }** + %122 = load { %Array*, %Array* }*, { %Array*, %Array* }** %121, align 8 + %123 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %122, i32 0, i32 0 + %124 = load %Array*, %Array** %123, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %124, i32 1) + %125 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %122, i32 0, i32 1 + %126 = load %Array*, %Array** %125, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %126, i32 1) + %127 = bitcast { %Array*, %Array* }* %122 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %127, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %128 = add i64 %118, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %129 = sub i64 %64, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %130 = phi i64 [ 0, %exit__9 ], [ %141, %exiting__10 ] + %131 = icmp sle i64 %130, %129 + br i1 %131, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %132 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %130) + %133 = bitcast i8* %132 to { { double, double }*, %Array* }** + %134 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %133, align 8 + %135 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %134, i32 0, i32 0 + %136 = load { double, double }*, { double, double }** %135, align 8 + %137 = bitcast { double, double }* %136 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %137, i32 1) + %138 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %134, i32 0, i32 1 + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + %140 = bitcast { { double, double }*, %Array* }* %134 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %140, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %141 = add i64 %130, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + %142 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedData, i32 0, i32 3 + %energyOffset = load double, double* %142, align 8 + %143 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 0 + %stateType = load i64, i64* %143, align 4 + %144 = sub i64 %64, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %145 = phi i64 [ 0, %exit__10 ], [ %156, %exiting__11 ] + %146 = icmp sle i64 %145, %144 + br i1 %146, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %145) + %148 = bitcast i8* %147 to { { double, double }*, %Array* }** + %149 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %148, align 8 + %150 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %149, i32 0, i32 0 + %151 = load { double, double }*, { double, double }** %150, align 8 + %152 = bitcast { double, double }* %151 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %152, i32 1) + %153 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %149, i32 0, i32 1 + %154 = load %Array*, %Array** %153, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 1) + %155 = bitcast { { double, double }*, %Array* }* %149 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %155, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %156 = add i64 %145, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 1) + %157 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %158 = bitcast %Tuple* %157 to { double, double }* + %159 = getelementptr inbounds { double, double }, { double, double }* %158, i32 0, i32 0 + %160 = getelementptr inbounds { double, double }, { double, double }* %158, i32 0, i32 1 + store double %theta1, double* %159, align 8 + store double 0.000000e+00, double* %160, align 8 + %161 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 0) + %163 = bitcast i8* %162 to i64* + %164 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 1) + %165 = bitcast i8* %164 to i64* + store i64 2, i64* %163, align 4 + store i64 0, i64* %165, align 4 + %166 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %158, %Array* %161) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %157, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 -1) + %167 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %168 = bitcast %Tuple* %167 to { double, double }* + %169 = getelementptr inbounds { double, double }, { double, double }* %168, i32 0, i32 0 + %170 = getelementptr inbounds { double, double }, { double, double }* %168, i32 0, i32 1 + store double %theta2, double* %169, align 8 + store double 0.000000e+00, double* %170, align 8 + %171 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %171, i64 0) + %173 = bitcast i8* %172 to i64* + %174 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %171, i64 1) + %175 = bitcast i8* %174 to i64* + store i64 3, i64* %173, align 4 + store i64 1, i64* %175, align 4 + %176 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %168, %Array* %171) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %167, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %171, i32 -1) + %177 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %178 = bitcast %Tuple* %177 to { double, double }* + %179 = getelementptr inbounds { double, double }, { double, double }* %178, i32 0, i32 0 + %180 = getelementptr inbounds { double, double }, { double, double }* %178, i32 0, i32 1 + store double %theta3, double* %179, align 8 + store double 0.000000e+00, double* %180, align 8 + %181 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 0) + %183 = bitcast i8* %182 to i64* + %184 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 1) + %185 = bitcast i8* %184 to i64* + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 2) + %187 = bitcast i8* %186 to i64* + %188 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %181, i64 3) + %189 = bitcast i8* %188 to i64* + store i64 2, i64* %183, align 4 + store i64 3, i64* %185, align 4 + store i64 1, i64* %187, align 4 + store i64 0, i64* %189, align 4 + %190 = call { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %178, %Array* %181) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %177, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 -1) + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 0) + %192 = bitcast i8* %191 to { { double, double }*, %Array* }** + %193 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %192, align 8 + %194 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %193, i32 0, i32 0 + %195 = load { double, double }*, { double, double }** %194, align 8 + %196 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %193, i32 0, i32 1 + %197 = load %Array*, %Array** %196, align 8 + %198 = bitcast { double, double }* %195 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %198, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %197, i32 1) + %199 = bitcast { { double, double }*, %Array* }* %193 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %199, i32 1) + %200 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 0) + %202 = bitcast i8* %201 to { { double, double }*, %Array* }** + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 1) + %204 = bitcast i8* %203 to { { double, double }*, %Array* }** + %205 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 2) + %206 = bitcast i8* %205 to { { double, double }*, %Array* }** + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 3) + %208 = bitcast i8* %207 to { { double, double }*, %Array* }** + store { { double, double }*, %Array* }* %166, { { double, double }*, %Array* }** %202, align 8 + store { { double, double }*, %Array* }* %176, { { double, double }*, %Array* }** %204, align 8 + store { { double, double }*, %Array* }* %190, { { double, double }*, %Array* }** %206, align 8 + store { { double, double }*, %Array* }* %193, { { double, double }*, %Array* }** %208, align 8 + %209 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %inputStateParam = bitcast %Tuple* %209 to { i64, %Array* }* + %210 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputStateParam, i32 0, i32 0 + %211 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputStateParam, i32 0, i32 1 + store i64 %stateType, i64* %210, align 4 + store %Array* %200, %Array** %211, align 8 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %212 = phi i64 [ 0, %exit__11 ], [ %223, %exiting__12 ] + %213 = icmp sle i64 %212, 3 + br i1 %213, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %212) + %215 = bitcast i8* %214 to { { double, double }*, %Array* }** + %216 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %216, i32 0, i32 0 + %218 = load { double, double }*, { double, double }** %217, align 8 + %219 = bitcast { double, double }* %218 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %219, i32 1) + %220 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %216, i32 0, i32 1 + %221 = load %Array*, %Array** %220, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %221, i32 1) + %222 = bitcast { { double, double }*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %222, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %223 = add i64 %212, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 1) + %JWEncodedDataParam = call { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerEncodingData__body(i64 %nSpinOrbitals, { %Array*, %Array*, %Array*, %Array* }* %fermionTermData, { i64, %Array* }* %inputState, double %energyOffset) + %224 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i32 0, i32 1 + %225 = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %224, align 8 + %226 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 0 + %227 = load %Array*, %Array** %226, align 8 + %228 = call i64 @__quantum__rt__array_get_size_1d(%Array* %227) + %229 = sub i64 %228, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %230 = phi i64 [ 0, %exit__12 ], [ %240, %exiting__13 ] + %231 = icmp sle i64 %230, %229 + br i1 %231, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %232 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %230) + %233 = bitcast i8* %232 to { %Array*, %Array* }** + %234 = load { %Array*, %Array* }*, { %Array*, %Array* }** %233, align 8 + %235 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %234, i32 0, i32 0 + %236 = load %Array*, %Array** %235, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 1) + %237 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %234, i32 0, i32 1 + %238 = load %Array*, %Array** %237, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %238, i32 1) + %239 = bitcast { %Array*, %Array* }* %234 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %239, i32 1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %240 = add i64 %230, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %227, i32 1) + %241 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 1 + %242 = load %Array*, %Array** %241, align 8 + %243 = call i64 @__quantum__rt__array_get_size_1d(%Array* %242) + %244 = sub i64 %243, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %245 = phi i64 [ 0, %exit__13 ], [ %255, %exiting__14 ] + %246 = icmp sle i64 %245, %244 + br i1 %246, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %247 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %245) + %248 = bitcast i8* %247 to { %Array*, %Array* }** + %249 = load { %Array*, %Array* }*, { %Array*, %Array* }** %248, align 8 + %250 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %249, i32 0, i32 0 + %251 = load %Array*, %Array** %250, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %251, i32 1) + %252 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %249, i32 0, i32 1 + %253 = load %Array*, %Array** %252, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %253, i32 1) + %254 = bitcast { %Array*, %Array* }* %249 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %254, i32 1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %255 = add i64 %245, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 1) + %256 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 2 + %257 = load %Array*, %Array** %256, align 8 + %258 = call i64 @__quantum__rt__array_get_size_1d(%Array* %257) + %259 = sub i64 %258, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %260 = phi i64 [ 0, %exit__14 ], [ %270, %exiting__15 ] + %261 = icmp sle i64 %260, %259 + br i1 %261, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %262 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %260) + %263 = bitcast i8* %262 to { %Array*, %Array* }** + %264 = load { %Array*, %Array* }*, { %Array*, %Array* }** %263, align 8 + %265 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %264, i32 0, i32 0 + %266 = load %Array*, %Array** %265, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %266, i32 1) + %267 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %264, i32 0, i32 1 + %268 = load %Array*, %Array** %267, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %268, i32 1) + %269 = bitcast { %Array*, %Array* }* %264 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %269, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %270 = add i64 %260, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %257, i32 1) + %271 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %225, i32 0, i32 3 + %272 = load %Array*, %Array** %271, align 8 + %273 = call i64 @__quantum__rt__array_get_size_1d(%Array* %272) + %274 = sub i64 %273, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %275 = phi i64 [ 0, %exit__15 ], [ %285, %exiting__16 ] + %276 = icmp sle i64 %275, %274 + br i1 %276, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %277 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %275) + %278 = bitcast i8* %277 to { %Array*, %Array* }** + %279 = load { %Array*, %Array* }*, { %Array*, %Array* }** %278, align 8 + %280 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 0 + %281 = load %Array*, %Array** %280, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %281, i32 1) + %282 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 1 + %283 = load %Array*, %Array** %282, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %283, i32 1) + %284 = bitcast { %Array*, %Array* }* %279 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %284, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %285 = add i64 %275, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %272, i32 1) + %286 = bitcast { %Array*, %Array*, %Array*, %Array* }* %225 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %286, i32 1) + %287 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i32 0, i32 2 + %288 = load { i64, %Array* }*, { i64, %Array* }** %287, align 8 + %289 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %288, i32 0, i32 1 + %290 = load %Array*, %Array** %289, align 8 + %291 = call i64 @__quantum__rt__array_get_size_1d(%Array* %290) + %292 = sub i64 %291, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %293 = phi i64 [ 0, %exit__16 ], [ %304, %exiting__17 ] + %294 = icmp sle i64 %293, %292 + br i1 %294, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %295 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %293) + %296 = bitcast i8* %295 to { { double, double }*, %Array* }** + %297 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %296, align 8 + %298 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %297, i32 0, i32 0 + %299 = load { double, double }*, { double, double }** %298, align 8 + %300 = bitcast { double, double }* %299 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %300, i32 1) + %301 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %297, i32 0, i32 1 + %302 = load %Array*, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %302, i32 1) + %303 = bitcast { { double, double }*, %Array* }* %297 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %303, i32 1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %304 = add i64 %293, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %290, i32 1) + %305 = bitcast { i64, %Array* }* %288 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %305, i32 1) + %306 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %306, i32 1) + %307 = call double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateEnergy__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %JWEncodedDataParam, i64 %nSamples) + %308 = sub i64 %3, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %309 = phi i64 [ 0, %exit__17 ], [ %319, %exiting__18 ] + %310 = icmp sle i64 %309, %308 + br i1 %310, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %311 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %309) + %312 = bitcast i8* %311 to { %Array*, %Array* }** + %313 = load { %Array*, %Array* }*, { %Array*, %Array* }** %312, align 8 + %314 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %313, i32 0, i32 0 + %315 = load %Array*, %Array** %314, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %315, i32 -1) + %316 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %313, i32 0, i32 1 + %317 = load %Array*, %Array** %316, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %317, i32 -1) + %318 = bitcast { %Array*, %Array* }* %313 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %318, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %319 = add i64 %309, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %320 = sub i64 %18, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %321 = phi i64 [ 0, %exit__18 ], [ %331, %exiting__19 ] + %322 = icmp sle i64 %321, %320 + br i1 %322, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %323 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %321) + %324 = bitcast i8* %323 to { %Array*, %Array* }** + %325 = load { %Array*, %Array* }*, { %Array*, %Array* }** %324, align 8 + %326 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %325, i32 0, i32 0 + %327 = load %Array*, %Array** %326, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %327, i32 -1) + %328 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %325, i32 0, i32 1 + %329 = load %Array*, %Array** %328, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %329, i32 -1) + %330 = bitcast { %Array*, %Array* }* %325 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %330, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %331 = add i64 %321, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %332 = sub i64 %33, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %333 = phi i64 [ 0, %exit__19 ], [ %343, %exiting__20 ] + %334 = icmp sle i64 %333, %332 + br i1 %334, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %335 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %333) + %336 = bitcast i8* %335 to { %Array*, %Array* }** + %337 = load { %Array*, %Array* }*, { %Array*, %Array* }** %336, align 8 + %338 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %337, i32 0, i32 0 + %339 = load %Array*, %Array** %338, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %339, i32 -1) + %340 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %337, i32 0, i32 1 + %341 = load %Array*, %Array** %340, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %341, i32 -1) + %342 = bitcast { %Array*, %Array* }* %337 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %342, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %343 = add i64 %333, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %344 = sub i64 %48, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %345 = phi i64 [ 0, %exit__20 ], [ %355, %exiting__21 ] + %346 = icmp sle i64 %345, %344 + br i1 %346, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %347 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %345) + %348 = bitcast i8* %347 to { %Array*, %Array* }** + %349 = load { %Array*, %Array* }*, { %Array*, %Array* }** %348, align 8 + %350 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %349, i32 0, i32 0 + %351 = load %Array*, %Array** %350, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %351, i32 -1) + %352 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %349, i32 0, i32 1 + %353 = load %Array*, %Array** %352, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %353, i32 -1) + %354 = bitcast { %Array*, %Array* }* %349 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %354, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %355 = add i64 %345, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %356 = sub i64 %64, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %357 = phi i64 [ 0, %exit__21 ], [ %368, %exiting__22 ] + %358 = icmp sle i64 %357, %356 + br i1 %358, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %359 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %357) + %360 = bitcast i8* %359 to { { double, double }*, %Array* }** + %361 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %360, align 8 + %362 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %361, i32 0, i32 0 + %363 = load { double, double }*, { double, double }** %362, align 8 + %364 = bitcast { double, double }* %363 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %364, i32 -1) + %365 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %361, i32 0, i32 1 + %366 = load %Array*, %Array** %365, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %366, i32 -1) + %367 = bitcast { { double, double }*, %Array* }* %361 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %367, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %368 = add i64 %357, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + %369 = sub i64 %3, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %370 = phi i64 [ 0, %exit__22 ], [ %380, %exiting__23 ] + %371 = icmp sle i64 %370, %369 + br i1 %371, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %372 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %370) + %373 = bitcast i8* %372 to { %Array*, %Array* }** + %374 = load { %Array*, %Array* }*, { %Array*, %Array* }** %373, align 8 + %375 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %374, i32 0, i32 0 + %376 = load %Array*, %Array** %375, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %376, i32 -1) + %377 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %374, i32 0, i32 1 + %378 = load %Array*, %Array** %377, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %378, i32 -1) + %379 = bitcast { %Array*, %Array* }* %374 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %379, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %380 = add i64 %370, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %381 = sub i64 %18, 1 + br label %header__24 + +header__24: ; preds = %exiting__24, %exit__23 + %382 = phi i64 [ 0, %exit__23 ], [ %392, %exiting__24 ] + %383 = icmp sle i64 %382, %381 + br i1 %383, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %384 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %382) + %385 = bitcast i8* %384 to { %Array*, %Array* }** + %386 = load { %Array*, %Array* }*, { %Array*, %Array* }** %385, align 8 + %387 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %386, i32 0, i32 0 + %388 = load %Array*, %Array** %387, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %388, i32 -1) + %389 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %386, i32 0, i32 1 + %390 = load %Array*, %Array** %389, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %390, i32 -1) + %391 = bitcast { %Array*, %Array* }* %386 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %391, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %392 = add i64 %382, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %393 = sub i64 %33, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %394 = phi i64 [ 0, %exit__24 ], [ %404, %exiting__25 ] + %395 = icmp sle i64 %394, %393 + br i1 %395, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %396 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %394) + %397 = bitcast i8* %396 to { %Array*, %Array* }** + %398 = load { %Array*, %Array* }*, { %Array*, %Array* }** %397, align 8 + %399 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %398, i32 0, i32 0 + %400 = load %Array*, %Array** %399, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %400, i32 -1) + %401 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %398, i32 0, i32 1 + %402 = load %Array*, %Array** %401, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %402, i32 -1) + %403 = bitcast { %Array*, %Array* }* %398 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %403, i32 -1) + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %404 = add i64 %394, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %405 = sub i64 %48, 1 + br label %header__26 + +header__26: ; preds = %exiting__26, %exit__25 + %406 = phi i64 [ 0, %exit__25 ], [ %416, %exiting__26 ] + %407 = icmp sle i64 %406, %405 + br i1 %407, label %body__26, label %exit__26 + +body__26: ; preds = %header__26 + %408 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %406) + %409 = bitcast i8* %408 to { %Array*, %Array* }** + %410 = load { %Array*, %Array* }*, { %Array*, %Array* }** %409, align 8 + %411 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %410, i32 0, i32 0 + %412 = load %Array*, %Array** %411, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %412, i32 -1) + %413 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %410, i32 0, i32 1 + %414 = load %Array*, %Array** %413, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %414, i32 -1) + %415 = bitcast { %Array*, %Array* }* %410 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %415, i32 -1) + br label %exiting__26 + +exiting__26: ; preds = %body__26 + %416 = add i64 %406, 1 + br label %header__26 + +exit__26: ; preds = %header__26 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %417 = sub i64 %64, 1 + br label %header__27 + +header__27: ; preds = %exiting__27, %exit__26 + %418 = phi i64 [ 0, %exit__26 ], [ %429, %exiting__27 ] + %419 = icmp sle i64 %418, %417 + br i1 %419, label %body__27, label %exit__27 + +body__27: ; preds = %header__27 + %420 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %418) + %421 = bitcast i8* %420 to { { double, double }*, %Array* }** + %422 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %421, align 8 + %423 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %422, i32 0, i32 0 + %424 = load { double, double }*, { double, double }** %423, align 8 + %425 = bitcast { double, double }* %424 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %425, i32 -1) + %426 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %422, i32 0, i32 1 + %427 = load %Array*, %Array** %426, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %427, i32 -1) + %428 = bitcast { { double, double }*, %Array* }* %422 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %428, i32 -1) + br label %exiting__27 + +exiting__27: ; preds = %body__27 + %429 = add i64 %418, 1 + br label %header__27 + +exit__27: ; preds = %header__27 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + %430 = sub i64 %64, 1 + br label %header__28 + +header__28: ; preds = %exiting__28, %exit__27 + %431 = phi i64 [ 0, %exit__27 ], [ %442, %exiting__28 ] + %432 = icmp sle i64 %431, %430 + br i1 %432, label %body__28, label %exit__28 + +body__28: ; preds = %header__28 + %433 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %JWInputStates, i64 %431) + %434 = bitcast i8* %433 to { { double, double }*, %Array* }** + %435 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %434, align 8 + %436 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %435, i32 0, i32 0 + %437 = load { double, double }*, { double, double }** %436, align 8 + %438 = bitcast { double, double }* %437 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %438, i32 -1) + %439 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %435, i32 0, i32 1 + %440 = load %Array*, %Array** %439, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %440, i32 -1) + %441 = bitcast { { double, double }*, %Array* }* %435 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %441, i32 -1) + br label %exiting__28 + +exiting__28: ; preds = %body__28 + %442 = add i64 %431, 1 + br label %header__28 + +exit__28: ; preds = %header__28 + call void @__quantum__rt__array_update_alias_count(%Array* %JWInputStates, i32 -1) + br label %header__29 + +header__29: ; preds = %exiting__29, %exit__28 + %443 = phi i64 [ 0, %exit__28 ], [ %454, %exiting__29 ] + %444 = icmp sle i64 %443, 3 + br i1 %444, label %body__29, label %exit__29 + +body__29: ; preds = %header__29 + %445 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %443) + %446 = bitcast i8* %445 to { { double, double }*, %Array* }** + %447 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %446, align 8 + %448 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %447, i32 0, i32 0 + %449 = load { double, double }*, { double, double }** %448, align 8 + %450 = bitcast { double, double }* %449 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %450, i32 -1) + %451 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %447, i32 0, i32 1 + %452 = load %Array*, %Array** %451, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %452, i32 -1) + %453 = bitcast { { double, double }*, %Array* }* %447 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %453, i32 -1) + br label %exiting__29 + +exiting__29: ; preds = %body__29 + %454 = add i64 %443, 1 + br label %header__29 + +exit__29: ; preds = %header__29 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 -1) + %455 = sub i64 %228, 1 + br label %header__30 + +header__30: ; preds = %exiting__30, %exit__29 + %456 = phi i64 [ 0, %exit__29 ], [ %466, %exiting__30 ] + %457 = icmp sle i64 %456, %455 + br i1 %457, label %body__30, label %exit__30 + +body__30: ; preds = %header__30 + %458 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %456) + %459 = bitcast i8* %458 to { %Array*, %Array* }** + %460 = load { %Array*, %Array* }*, { %Array*, %Array* }** %459, align 8 + %461 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %460, i32 0, i32 0 + %462 = load %Array*, %Array** %461, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %462, i32 -1) + %463 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %460, i32 0, i32 1 + %464 = load %Array*, %Array** %463, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %464, i32 -1) + %465 = bitcast { %Array*, %Array* }* %460 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %465, i32 -1) + br label %exiting__30 + +exiting__30: ; preds = %body__30 + %466 = add i64 %456, 1 + br label %header__30 + +exit__30: ; preds = %header__30 + call void @__quantum__rt__array_update_alias_count(%Array* %227, i32 -1) + %467 = sub i64 %243, 1 + br label %header__31 + +header__31: ; preds = %exiting__31, %exit__30 + %468 = phi i64 [ 0, %exit__30 ], [ %478, %exiting__31 ] + %469 = icmp sle i64 %468, %467 + br i1 %469, label %body__31, label %exit__31 + +body__31: ; preds = %header__31 + %470 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %468) + %471 = bitcast i8* %470 to { %Array*, %Array* }** + %472 = load { %Array*, %Array* }*, { %Array*, %Array* }** %471, align 8 + %473 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %472, i32 0, i32 0 + %474 = load %Array*, %Array** %473, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %474, i32 -1) + %475 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %472, i32 0, i32 1 + %476 = load %Array*, %Array** %475, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %476, i32 -1) + %477 = bitcast { %Array*, %Array* }* %472 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %477, i32 -1) + br label %exiting__31 + +exiting__31: ; preds = %body__31 + %478 = add i64 %468, 1 + br label %header__31 + +exit__31: ; preds = %header__31 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 -1) + %479 = sub i64 %258, 1 + br label %header__32 + +header__32: ; preds = %exiting__32, %exit__31 + %480 = phi i64 [ 0, %exit__31 ], [ %490, %exiting__32 ] + %481 = icmp sle i64 %480, %479 + br i1 %481, label %body__32, label %exit__32 + +body__32: ; preds = %header__32 + %482 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %480) + %483 = bitcast i8* %482 to { %Array*, %Array* }** + %484 = load { %Array*, %Array* }*, { %Array*, %Array* }** %483, align 8 + %485 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %484, i32 0, i32 0 + %486 = load %Array*, %Array** %485, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %486, i32 -1) + %487 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %484, i32 0, i32 1 + %488 = load %Array*, %Array** %487, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %488, i32 -1) + %489 = bitcast { %Array*, %Array* }* %484 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %489, i32 -1) + br label %exiting__32 + +exiting__32: ; preds = %body__32 + %490 = add i64 %480, 1 + br label %header__32 + +exit__32: ; preds = %header__32 + call void @__quantum__rt__array_update_alias_count(%Array* %257, i32 -1) + %491 = sub i64 %273, 1 + br label %header__33 + +header__33: ; preds = %exiting__33, %exit__32 + %492 = phi i64 [ 0, %exit__32 ], [ %502, %exiting__33 ] + %493 = icmp sle i64 %492, %491 + br i1 %493, label %body__33, label %exit__33 + +body__33: ; preds = %header__33 + %494 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %492) + %495 = bitcast i8* %494 to { %Array*, %Array* }** + %496 = load { %Array*, %Array* }*, { %Array*, %Array* }** %495, align 8 + %497 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %496, i32 0, i32 0 + %498 = load %Array*, %Array** %497, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %498, i32 -1) + %499 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %496, i32 0, i32 1 + %500 = load %Array*, %Array** %499, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %500, i32 -1) + %501 = bitcast { %Array*, %Array* }* %496 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %501, i32 -1) + br label %exiting__33 + +exiting__33: ; preds = %body__33 + %502 = add i64 %492, 1 + br label %header__33 + +exit__33: ; preds = %header__33 + call void @__quantum__rt__array_update_alias_count(%Array* %272, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %286, i32 -1) + %503 = sub i64 %291, 1 + br label %header__34 + +header__34: ; preds = %exiting__34, %exit__33 + %504 = phi i64 [ 0, %exit__33 ], [ %515, %exiting__34 ] + %505 = icmp sle i64 %504, %503 + br i1 %505, label %body__34, label %exit__34 + +body__34: ; preds = %header__34 + %506 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %504) + %507 = bitcast i8* %506 to { { double, double }*, %Array* }** + %508 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %507, align 8 + %509 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %508, i32 0, i32 0 + %510 = load { double, double }*, { double, double }** %509, align 8 + %511 = bitcast { double, double }* %510 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %511, i32 -1) + %512 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %508, i32 0, i32 1 + %513 = load %Array*, %Array** %512, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %513, i32 -1) + %514 = bitcast { { double, double }*, %Array* }* %508 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %514, i32 -1) + br label %exiting__34 + +exiting__34: ; preds = %body__34 + %515 = add i64 %504, 1 + br label %header__34 + +exit__34: ; preds = %header__34 + call void @__quantum__rt__array_update_alias_count(%Array* %290, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %305, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %306, i32 -1) + br label %header__35 + +header__35: ; preds = %exiting__35, %exit__34 + %516 = phi i64 [ 0, %exit__34 ], [ %527, %exiting__35 ] + %517 = icmp sle i64 %516, 3 + br i1 %517, label %body__35, label %exit__35 + +body__35: ; preds = %header__35 + %518 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 %516) + %519 = bitcast i8* %518 to { { double, double }*, %Array* }** + %520 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %519, align 8 + %521 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %520, i32 0, i32 0 + %522 = load { double, double }*, { double, double }** %521, align 8 + %523 = bitcast { double, double }* %522 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %523, i32 -1) + %524 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %520, i32 0, i32 1 + %525 = load %Array*, %Array** %524, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %525, i32 -1) + %526 = bitcast { { double, double }*, %Array* }* %520 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %526, i32 -1) + br label %exiting__35 + +exiting__35: ; preds = %body__35 + %527 = add i64 %516, 1 + br label %header__35 + +exit__35: ; preds = %header__35 + call void @__quantum__rt__array_update_reference_count(%Array* %200, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %209, i32 -1) + %528 = sub i64 %228, 1 + br label %header__36 + +header__36: ; preds = %exiting__36, %exit__35 + %529 = phi i64 [ 0, %exit__35 ], [ %539, %exiting__36 ] + %530 = icmp sle i64 %529, %528 + br i1 %530, label %body__36, label %exit__36 + +body__36: ; preds = %header__36 + %531 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %227, i64 %529) + %532 = bitcast i8* %531 to { %Array*, %Array* }** + %533 = load { %Array*, %Array* }*, { %Array*, %Array* }** %532, align 8 + %534 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %533, i32 0, i32 0 + %535 = load %Array*, %Array** %534, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %535, i32 -1) + %536 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %533, i32 0, i32 1 + %537 = load %Array*, %Array** %536, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %537, i32 -1) + %538 = bitcast { %Array*, %Array* }* %533 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %538, i32 -1) + br label %exiting__36 + +exiting__36: ; preds = %body__36 + %539 = add i64 %529, 1 + br label %header__36 + +exit__36: ; preds = %header__36 + call void @__quantum__rt__array_update_reference_count(%Array* %227, i32 -1) + %540 = sub i64 %243, 1 + br label %header__37 + +header__37: ; preds = %exiting__37, %exit__36 + %541 = phi i64 [ 0, %exit__36 ], [ %551, %exiting__37 ] + %542 = icmp sle i64 %541, %540 + br i1 %542, label %body__37, label %exit__37 + +body__37: ; preds = %header__37 + %543 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %242, i64 %541) + %544 = bitcast i8* %543 to { %Array*, %Array* }** + %545 = load { %Array*, %Array* }*, { %Array*, %Array* }** %544, align 8 + %546 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %545, i32 0, i32 0 + %547 = load %Array*, %Array** %546, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %547, i32 -1) + %548 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %545, i32 0, i32 1 + %549 = load %Array*, %Array** %548, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %549, i32 -1) + %550 = bitcast { %Array*, %Array* }* %545 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %550, i32 -1) + br label %exiting__37 + +exiting__37: ; preds = %body__37 + %551 = add i64 %541, 1 + br label %header__37 + +exit__37: ; preds = %header__37 + call void @__quantum__rt__array_update_reference_count(%Array* %242, i32 -1) + %552 = sub i64 %258, 1 + br label %header__38 + +header__38: ; preds = %exiting__38, %exit__37 + %553 = phi i64 [ 0, %exit__37 ], [ %563, %exiting__38 ] + %554 = icmp sle i64 %553, %552 + br i1 %554, label %body__38, label %exit__38 + +body__38: ; preds = %header__38 + %555 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %257, i64 %553) + %556 = bitcast i8* %555 to { %Array*, %Array* }** + %557 = load { %Array*, %Array* }*, { %Array*, %Array* }** %556, align 8 + %558 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %557, i32 0, i32 0 + %559 = load %Array*, %Array** %558, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %559, i32 -1) + %560 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %557, i32 0, i32 1 + %561 = load %Array*, %Array** %560, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %561, i32 -1) + %562 = bitcast { %Array*, %Array* }* %557 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %562, i32 -1) + br label %exiting__38 + +exiting__38: ; preds = %body__38 + %563 = add i64 %553, 1 + br label %header__38 + +exit__38: ; preds = %header__38 + call void @__quantum__rt__array_update_reference_count(%Array* %257, i32 -1) + %564 = sub i64 %273, 1 + br label %header__39 + +header__39: ; preds = %exiting__39, %exit__38 + %565 = phi i64 [ 0, %exit__38 ], [ %575, %exiting__39 ] + %566 = icmp sle i64 %565, %564 + br i1 %566, label %body__39, label %exit__39 + +body__39: ; preds = %header__39 + %567 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %272, i64 %565) + %568 = bitcast i8* %567 to { %Array*, %Array* }** + %569 = load { %Array*, %Array* }*, { %Array*, %Array* }** %568, align 8 + %570 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %569, i32 0, i32 0 + %571 = load %Array*, %Array** %570, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %571, i32 -1) + %572 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %569, i32 0, i32 1 + %573 = load %Array*, %Array** %572, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %573, i32 -1) + %574 = bitcast { %Array*, %Array* }* %569 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %574, i32 -1) + br label %exiting__39 + +exiting__39: ; preds = %body__39 + %575 = add i64 %565, 1 + br label %header__39 + +exit__39: ; preds = %header__39 + call void @__quantum__rt__array_update_reference_count(%Array* %272, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %286, i32 -1) + %576 = sub i64 %291, 1 + br label %header__40 + +header__40: ; preds = %exiting__40, %exit__39 + %577 = phi i64 [ 0, %exit__39 ], [ %588, %exiting__40 ] + %578 = icmp sle i64 %577, %576 + br i1 %578, label %body__40, label %exit__40 + +body__40: ; preds = %header__40 + %579 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %577) + %580 = bitcast i8* %579 to { { double, double }*, %Array* }** + %581 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %580, align 8 + %582 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %581, i32 0, i32 0 + %583 = load { double, double }*, { double, double }** %582, align 8 + %584 = bitcast { double, double }* %583 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %584, i32 -1) + %585 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %581, i32 0, i32 1 + %586 = load %Array*, %Array** %585, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %586, i32 -1) + %587 = bitcast { { double, double }*, %Array* }* %581 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %587, i32 -1) + br label %exiting__40 + +exiting__40: ; preds = %body__40 + %588 = add i64 %577, 1 + br label %header__40 + +exit__40: ; preds = %header__40 + call void @__quantum__rt__array_update_reference_count(%Array* %290, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %305, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %306, i32 -1) + ret double %307 +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal { { double, double }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerInputState__body({ double, double }* %0, %Array* %__Item3__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }*, %Array* }* getelementptr ({ { double, double }*, %Array* }, { { double, double }*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { double, double }*, %Array* }* + %3 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %2, i32 0, i32 1 + store { double, double }* %0, { double, double }** %3, align 8 + store %Array* %__Item3__, %Array** %4, align 8 + %5 = bitcast { double, double }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__Item3__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 -1) + ret { { double, double }*, %Array* }* %2 +} + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerEncodingData__body(i64 %__Item1__, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, { i64, %Array* }* %0, double %__Item5__) { +entry: + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* getelementptr ({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* null, i32 1) to i64)) + %63 = bitcast %Tuple* %62 to { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* + %64 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 0 + %65 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 1 + %66 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 2 + %67 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63, i32 0, i32 3 + store i64 %__Item1__, i64* %64, align 4 + store { %Array*, %Array*, %Array*, %Array* }* %__Item2__, { %Array*, %Array*, %Array*, %Array* }** %65, align 8 + store { i64, %Array* }* %0, { i64, %Array* }** %66, align 8 + store double %__Item5__, double* %67, align 8 + %68 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 0 + %69 = load %Array*, %Array** %68, align 8 + %70 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 1 + %71 = load %Array*, %Array** %70, align 8 + %72 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 2 + %73 = load %Array*, %Array** %72, align 8 + %74 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %__Item2__, i32 0, i32 3 + %75 = load %Array*, %Array** %74, align 8 + %76 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %77 = load %Array*, %Array** %76, align 8 + %78 = call i64 @__quantum__rt__array_get_size_1d(%Array* %69) + %79 = sub i64 %78, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %80 = phi i64 [ 0, %exit__4 ], [ %90, %exiting__5 ] + %81 = icmp sle i64 %80, %79 + br i1 %81, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %80) + %83 = bitcast i8* %82 to { %Array*, %Array* }** + %84 = load { %Array*, %Array* }*, { %Array*, %Array* }** %83, align 8 + %85 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %84, i32 0, i32 0 + %86 = load %Array*, %Array** %85, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %86, i32 1) + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %84, i32 0, i32 1 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %88, i32 1) + %89 = bitcast { %Array*, %Array* }* %84 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %89, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %90 = add i64 %80, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 1) + %91 = call i64 @__quantum__rt__array_get_size_1d(%Array* %71) + %92 = sub i64 %91, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %93 = phi i64 [ 0, %exit__5 ], [ %103, %exiting__6 ] + %94 = icmp sle i64 %93, %92 + br i1 %94, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 %93) + %96 = bitcast i8* %95 to { %Array*, %Array* }** + %97 = load { %Array*, %Array* }*, { %Array*, %Array* }** %96, align 8 + %98 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %97, i32 0, i32 0 + %99 = load %Array*, %Array** %98, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 1) + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %97, i32 0, i32 1 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %101, i32 1) + %102 = bitcast { %Array*, %Array* }* %97 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %102, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %103 = add i64 %93, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 1) + %104 = call i64 @__quantum__rt__array_get_size_1d(%Array* %73) + %105 = sub i64 %104, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %106 = phi i64 [ 0, %exit__6 ], [ %116, %exiting__7 ] + %107 = icmp sle i64 %106, %105 + br i1 %107, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %106) + %109 = bitcast i8* %108 to { %Array*, %Array* }** + %110 = load { %Array*, %Array* }*, { %Array*, %Array* }** %109, align 8 + %111 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 0 + %112 = load %Array*, %Array** %111, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %112, i32 1) + %113 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %110, i32 0, i32 1 + %114 = load %Array*, %Array** %113, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %114, i32 1) + %115 = bitcast { %Array*, %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %115, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %116 = add i64 %106, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 1) + %117 = call i64 @__quantum__rt__array_get_size_1d(%Array* %75) + %118 = sub i64 %117, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %119 = phi i64 [ 0, %exit__7 ], [ %129, %exiting__8 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %75, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %129 = add i64 %119, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %75, i32 1) + %130 = bitcast { %Array*, %Array*, %Array*, %Array* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %130, i32 1) + %131 = call i64 @__quantum__rt__array_get_size_1d(%Array* %77) + %132 = sub i64 %131, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %133 = phi i64 [ 0, %exit__8 ], [ %144, %exiting__9 ] + %134 = icmp sle i64 %133, %132 + br i1 %134, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 %133) + %136 = bitcast i8* %135 to { { double, double }*, %Array* }** + %137 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %136, align 8 + %138 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %137, i32 0, i32 0 + %139 = load { double, double }*, { double, double }** %138, align 8 + %140 = bitcast { double, double }* %139 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %140, i32 1) + %141 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %137, i32 0, i32 1 + %142 = load %Array*, %Array** %141, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %142, i32 1) + %143 = bitcast { { double, double }*, %Array* }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %143, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %144 = add i64 %133, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %77, i32 1) + %145 = bitcast { i64, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %145, i32 1) + %146 = sub i64 %3, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %147 = phi i64 [ 0, %exit__9 ], [ %157, %exiting__10 ] + %148 = icmp sle i64 %147, %146 + br i1 %148, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %149 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %147) + %150 = bitcast i8* %149 to { %Array*, %Array* }** + %151 = load { %Array*, %Array* }*, { %Array*, %Array* }** %150, align 8 + %152 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %151, i32 0, i32 0 + %153 = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %153, i32 -1) + %154 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %151, i32 0, i32 1 + %155 = load %Array*, %Array** %154, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %155, i32 -1) + %156 = bitcast { %Array*, %Array* }* %151 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %156, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %157 = add i64 %147, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %158 = sub i64 %18, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %159 = phi i64 [ 0, %exit__10 ], [ %169, %exiting__11 ] + %160 = icmp sle i64 %159, %158 + br i1 %160, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %161 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %159) + %162 = bitcast i8* %161 to { %Array*, %Array* }** + %163 = load { %Array*, %Array* }*, { %Array*, %Array* }** %162, align 8 + %164 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %163, i32 0, i32 0 + %165 = load %Array*, %Array** %164, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %165, i32 -1) + %166 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %163, i32 0, i32 1 + %167 = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + %168 = bitcast { %Array*, %Array* }* %163 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %168, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %169 = add i64 %159, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %170 = sub i64 %33, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %181, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %171) + %174 = bitcast i8* %173 to { %Array*, %Array* }** + %175 = load { %Array*, %Array* }*, { %Array*, %Array* }** %174, align 8 + %176 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %175, i32 0, i32 0 + %177 = load %Array*, %Array** %176, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %177, i32 -1) + %178 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %175, i32 0, i32 1 + %179 = load %Array*, %Array** %178, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %179, i32 -1) + %180 = bitcast { %Array*, %Array* }* %175 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %180, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %181 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %182 = sub i64 %48, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %183 = phi i64 [ 0, %exit__12 ], [ %193, %exiting__13 ] + %184 = icmp sle i64 %183, %182 + br i1 %184, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %183) + %186 = bitcast i8* %185 to { %Array*, %Array* }** + %187 = load { %Array*, %Array* }*, { %Array*, %Array* }** %186, align 8 + %188 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %187, i32 0, i32 0 + %189 = load %Array*, %Array** %188, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %189, i32 -1) + %190 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %187, i32 0, i32 1 + %191 = load %Array*, %Array** %190, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %191, i32 -1) + %192 = bitcast { %Array*, %Array* }* %187 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %192, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %193 = add i64 %183, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + ret { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %63 +} + +define internal double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateEnergy__body({ i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i64 %nSamples) { +entry: + %energy = alloca double, align 8 + %0 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 1 + %jwTerms = load { %Array*, %Array*, %Array*, %Array* }*, { %Array*, %Array*, %Array*, %Array* }** %0, align 8 + %1 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %2) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %5) + %8 = bitcast i8* %7 to { %Array*, %Array* }** + %9 = load { %Array*, %Array* }*, { %Array*, %Array* }** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array*, %Array* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { %Array*, %Array* }** + %24 = load { %Array*, %Array* }*, { %Array*, %Array* }** %23, align 8 + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { %Array*, %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %31 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 2 + %32 = load %Array*, %Array** %31, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = sub i64 %33, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %35) + %38 = bitcast i8* %37 to { %Array*, %Array* }** + %39 = load { %Array*, %Array* }*, { %Array*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + %44 = bitcast { %Array*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %46 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %jwTerms, i32 0, i32 3 + %47 = load %Array*, %Array** %46, align 8 + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %60, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { %Array*, %Array* }** + %54 = load { %Array*, %Array* }*, { %Array*, %Array* }** %53, align 8 + %55 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %57 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %54, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %59 = bitcast { %Array*, %Array* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %60 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %61 = bitcast { %Array*, %Array*, %Array*, %Array* }* %jwTerms to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %62 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 2 + %inputState = load { i64, %Array* }*, { i64, %Array* }** %62, align 8 + %63 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %64 = load %Array*, %Array** %63, align 8 + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %64) + %66 = sub i64 %65, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %67 = phi i64 [ 0, %exit__4 ], [ %78, %exiting__5 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %67) + %70 = bitcast i8* %69 to { { double, double }*, %Array* }** + %71 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %70, align 8 + %72 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 0 + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %74, i32 1) + %75 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %71, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { { double, double }*, %Array* }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %78 = add i64 %67, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %79 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %80 = bitcast { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + store double 0.000000e+00, double* %energy, align 8 + %81 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 0 + %nQubits = load i64, i64* %81, align 4 + %82 = sub i64 %3, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %83 = phi i64 [ 0, %exit__5 ], [ %93, %exiting__6 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %83) + %86 = bitcast i8* %85 to { %Array*, %Array* }** + %87 = load { %Array*, %Array* }*, { %Array*, %Array* }** %86, align 8 + %88 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %89, i32 1) + %90 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %87, i32 0, i32 1 + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 1) + %92 = bitcast { %Array*, %Array* }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %92, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %93 = add i64 %83, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + %94 = sub i64 %18, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %95 = phi i64 [ 0, %exit__6 ], [ %105, %exiting__7 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %95) + %98 = bitcast i8* %97 to { %Array*, %Array* }** + %99 = load { %Array*, %Array* }*, { %Array*, %Array* }** %98, align 8 + %100 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 0 + %101 = load %Array*, %Array** %100, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %101, i32 1) + %102 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %99, i32 0, i32 1 + %103 = load %Array*, %Array** %102, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %103, i32 1) + %104 = bitcast { %Array*, %Array* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %105 = add i64 %95, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %106 = sub i64 %33, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %107 = phi i64 [ 0, %exit__7 ], [ %117, %exiting__8 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %107) + %110 = bitcast i8* %109 to { %Array*, %Array* }** + %111 = load { %Array*, %Array* }*, { %Array*, %Array* }** %110, align 8 + %112 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 0 + %113 = load %Array*, %Array** %112, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %113, i32 1) + %114 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %111, i32 0, i32 1 + %115 = load %Array*, %Array** %114, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %115, i32 1) + %116 = bitcast { %Array*, %Array* }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %117 = add i64 %107, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + %118 = sub i64 %48, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %119 = phi i64 [ 0, %exit__8 ], [ %129, %exiting__9 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %119) + %122 = bitcast i8* %121 to { %Array*, %Array* }** + %123 = load { %Array*, %Array* }*, { %Array*, %Array* }** %122, align 8 + %124 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 0 + %125 = load %Array*, %Array** %124, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %125, i32 1) + %126 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %123, i32 0, i32 1 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 1) + %128 = bitcast { %Array*, %Array* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %129 = add i64 %119, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 1) + %130 = sub i64 %65, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %131 = phi i64 [ 0, %exit__9 ], [ %142, %exiting__10 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %131) + %134 = bitcast i8* %133 to { { double, double }*, %Array* }** + %135 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %134, align 8 + %136 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 0 + %137 = load { double, double }*, { double, double }** %136, align 8 + %138 = bitcast { double, double }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %138, i32 1) + %139 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %135, i32 0, i32 1 + %140 = load %Array*, %Array** %139, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + %141 = bitcast { { double, double }*, %Array* }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %142 = add i64 %131, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + %143 = getelementptr inbounds { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }, { i64, { %Array*, %Array*, %Array*, %Array* }*, { i64, %Array* }*, double }* %jwHamiltonian, i32 0, i32 3 + %energyOffset = load double, double* %143, align 8 + %144 = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %jwTerms) + %145 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %144, i32 0, i32 0 + %nTerms = load i64, i64* %145, align 4 + %146 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %144, i32 0, i32 1 + %indexFunction = load %Callable*, %Callable** %146, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %indexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %indexFunction, i32 1) + %147 = sub i64 %nTerms, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %idxTerm = phi i64 [ 0, %exit__10 ], [ %166, %exiting__11 ] + %148 = icmp sle i64 %idxTerm, %147 + br i1 %148, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %149 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %150 = bitcast %Tuple* %149 to { i64 }* + %151 = getelementptr inbounds { i64 }, { i64 }* %150, i32 0, i32 0 + store i64 %idxTerm, i64* %151, align 4 + %152 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %indexFunction, %Tuple* %149, %Tuple* %152) + %153 = bitcast %Tuple* %152 to { { { %Array*, %Array* }*, %Array* }* }* + %154 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %153, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %154, align 8 + %155 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %156 = load { %Array*, %Array* }*, { %Array*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %157, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %158 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 1 + %coeff = load %Array*, %Array** %158, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %159 = bitcast { %Array*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 1) + %160 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %160, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %161 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %163 = bitcast i8* %162 to i64* + %termType = load i64, i64* %163, align 4 + %ops = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__MeasurementOperators__body(i64 %nQubits, %Array* %idxFermions, i64 %termType) + %164 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ops) + %165 = sub i64 %164, 1 + br label %header__12 + +exiting__11: ; preds = %exit__15 + %166 = add i64 %idxTerm, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + %167 = load double, double* %energy, align 8 + %168 = fadd double %167, %energyOffset + %169 = sub i64 %3, 1 + br label %header__16 + +header__12: ; preds = %exiting__12, %body__11 + %170 = phi i64 [ 0, %body__11 ], [ %175, %exiting__12 ] + %171 = icmp sle i64 %170, %165 + br i1 %171, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %170) + %173 = bitcast i8* %172 to %Array** + %174 = load %Array*, %Array** %173, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %174, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %175 = add i64 %170, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %coeffs = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__ExpandedCoefficients__body(%Array* %coeff, i64 %termType) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 1) + %176 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %177 = load %Array*, %Array** %63, align 8 + %178 = call i64 @__quantum__rt__array_get_size_1d(%Array* %177) + %179 = sub i64 %178, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %180 = phi i64 [ 0, %exit__12 ], [ %191, %exiting__13 ] + %181 = icmp sle i64 %180, %179 + br i1 %181, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %177, i64 %180) + %183 = bitcast i8* %182 to { { double, double }*, %Array* }** + %184 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %183, align 8 + %185 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %184, i32 0, i32 0 + %186 = load { double, double }*, { double, double }** %185, align 8 + %187 = bitcast { double, double }* %186 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %187, i32 1) + %188 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %184, i32 0, i32 1 + %189 = load %Array*, %Array** %188, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %189, i32 1) + %190 = bitcast { { double, double }*, %Array* }* %184 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %190, i32 1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %191 = add i64 %180, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %177, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 1) + %192 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Array* }* }* getelementptr ({ %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* null, i32 1) to i64)) + %193 = bitcast %Tuple* %192 to { %Callable*, { i64, %Array* }* }* + %194 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %193, i32 0, i32 0 + %195 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %193, i32 0, i32 1 + store %Callable* %176, %Callable** %194, align 8 + store { i64, %Array* }* %inputState, { i64, %Array* }** %195, align 8 + %inputStateUnitary = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__41__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__26__FunctionTable, %Tuple* %192) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 1) + %jwTermEnergy = call double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateTermExpectation__body(%Callable* %inputStateUnitary, %Array* %ops, %Array* %coeffs, i64 %nQubits, i64 %nSamples) + %196 = load double, double* %energy, align 8 + %197 = fadd double %196, %jwTermEnergy + store double %197, double* %energy, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + %198 = sub i64 %164, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %199 = phi i64 [ 0, %exit__13 ], [ %204, %exiting__14 ] + %200 = icmp sle i64 %199, %198 + br i1 %200, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %199) + %202 = bitcast i8* %201 to %Array** + %203 = load %Array*, %Array** %202, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %203, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %204 = add i64 %199, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %149, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %159, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %152, i32 -1) + %205 = sub i64 %164, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %206 = phi i64 [ 0, %exit__14 ], [ %211, %exiting__15 ] + %207 = icmp sle i64 %206, %205 + br i1 %207, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %208 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %206) + %209 = bitcast i8* %208 to %Array** + %210 = load %Array*, %Array** %209, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %210, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %211 = add i64 %206, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeffs, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %inputStateUnitary, i32 -1) + br label %exiting__11 + +header__16: ; preds = %exiting__16, %exit__11 + %212 = phi i64 [ 0, %exit__11 ], [ %222, %exiting__16 ] + %213 = icmp sle i64 %212, %169 + br i1 %213, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %212) + %215 = bitcast i8* %214 to { %Array*, %Array* }** + %216 = load { %Array*, %Array* }*, { %Array*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 0 + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + %219 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 1 + %220 = load %Array*, %Array** %219, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %220, i32 -1) + %221 = bitcast { %Array*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %221, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %222 = add i64 %212, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %223 = sub i64 %18, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %224 = phi i64 [ 0, %exit__16 ], [ %234, %exiting__17 ] + %225 = icmp sle i64 %224, %223 + br i1 %225, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %226 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %224) + %227 = bitcast i8* %226 to { %Array*, %Array* }** + %228 = load { %Array*, %Array* }*, { %Array*, %Array* }** %227, align 8 + %229 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + %231 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 1 + %232 = load %Array*, %Array** %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %232, i32 -1) + %233 = bitcast { %Array*, %Array* }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %233, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %234 = add i64 %224, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %235 = sub i64 %33, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %236 = phi i64 [ 0, %exit__17 ], [ %246, %exiting__18 ] + %237 = icmp sle i64 %236, %235 + br i1 %237, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %238 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %236) + %239 = bitcast i8* %238 to { %Array*, %Array* }** + %240 = load { %Array*, %Array* }*, { %Array*, %Array* }** %239, align 8 + %241 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %240, i32 0, i32 0 + %242 = load %Array*, %Array** %241, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %242, i32 -1) + %243 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %240, i32 0, i32 1 + %244 = load %Array*, %Array** %243, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %244, i32 -1) + %245 = bitcast { %Array*, %Array* }* %240 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %245, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %246 = add i64 %236, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %247 = sub i64 %48, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %248 = phi i64 [ 0, %exit__18 ], [ %258, %exiting__19 ] + %249 = icmp sle i64 %248, %247 + br i1 %249, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %250 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %248) + %251 = bitcast i8* %250 to { %Array*, %Array* }** + %252 = load { %Array*, %Array* }*, { %Array*, %Array* }** %251, align 8 + %253 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %252, i32 0, i32 0 + %254 = load %Array*, %Array** %253, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %254, i32 -1) + %255 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %252, i32 0, i32 1 + %256 = load %Array*, %Array** %255, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %256, i32 -1) + %257 = bitcast { %Array*, %Array* }* %252 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %257, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %258 = add i64 %248, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %259 = load %Array*, %Array** %63, align 8 + %260 = call i64 @__quantum__rt__array_get_size_1d(%Array* %259) + %261 = sub i64 %260, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %262 = phi i64 [ 0, %exit__19 ], [ %273, %exiting__20 ] + %263 = icmp sle i64 %262, %261 + br i1 %263, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %264 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %259, i64 %262) + %265 = bitcast i8* %264 to { { double, double }*, %Array* }** + %266 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %265, align 8 + %267 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %266, i32 0, i32 0 + %268 = load { double, double }*, { double, double }** %267, align 8 + %269 = bitcast { double, double }* %268 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %269, i32 -1) + %270 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %266, i32 0, i32 1 + %271 = load %Array*, %Array** %270, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %271, i32 -1) + %272 = bitcast { { double, double }*, %Array* }* %266 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %272, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %273 = add i64 %262, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %259, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + %274 = sub i64 %3, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %275 = phi i64 [ 0, %exit__20 ], [ %285, %exiting__21 ] + %276 = icmp sle i64 %275, %274 + br i1 %276, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %277 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 %275) + %278 = bitcast i8* %277 to { %Array*, %Array* }** + %279 = load { %Array*, %Array* }*, { %Array*, %Array* }** %278, align 8 + %280 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 0 + %281 = load %Array*, %Array** %280, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %281, i32 -1) + %282 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %279, i32 0, i32 1 + %283 = load %Array*, %Array** %282, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %283, i32 -1) + %284 = bitcast { %Array*, %Array* }* %279 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %284, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %285 = add i64 %275, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 -1) + %286 = sub i64 %18, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %287 = phi i64 [ 0, %exit__21 ], [ %297, %exiting__22 ] + %288 = icmp sle i64 %287, %286 + br i1 %288, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %289 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %287) + %290 = bitcast i8* %289 to { %Array*, %Array* }** + %291 = load { %Array*, %Array* }*, { %Array*, %Array* }** %290, align 8 + %292 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %291, i32 0, i32 0 + %293 = load %Array*, %Array** %292, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %293, i32 -1) + %294 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %291, i32 0, i32 1 + %295 = load %Array*, %Array** %294, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %295, i32 -1) + %296 = bitcast { %Array*, %Array* }* %291 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %296, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %297 = add i64 %287, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %298 = sub i64 %33, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %299 = phi i64 [ 0, %exit__22 ], [ %309, %exiting__23 ] + %300 = icmp sle i64 %299, %298 + br i1 %300, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %301 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %299) + %302 = bitcast i8* %301 to { %Array*, %Array* }** + %303 = load { %Array*, %Array* }*, { %Array*, %Array* }** %302, align 8 + %304 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %303, i32 0, i32 0 + %305 = load %Array*, %Array** %304, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %305, i32 -1) + %306 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %303, i32 0, i32 1 + %307 = load %Array*, %Array** %306, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %307, i32 -1) + %308 = bitcast { %Array*, %Array* }* %303 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %308, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %309 = add i64 %299, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %310 = sub i64 %48, 1 + br label %header__24 + +header__24: ; preds = %exiting__24, %exit__23 + %311 = phi i64 [ 0, %exit__23 ], [ %321, %exiting__24 ] + %312 = icmp sle i64 %311, %310 + br i1 %312, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %313 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %311) + %314 = bitcast i8* %313 to { %Array*, %Array* }** + %315 = load { %Array*, %Array* }*, { %Array*, %Array* }** %314, align 8 + %316 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %315, i32 0, i32 0 + %317 = load %Array*, %Array** %316, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %317, i32 -1) + %318 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %315, i32 0, i32 1 + %319 = load %Array*, %Array** %318, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %319, i32 -1) + %320 = bitcast { %Array*, %Array* }* %315 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %320, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %321 = add i64 %311, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %61, i32 -1) + %322 = sub i64 %260, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %323 = phi i64 [ 0, %exit__24 ], [ %334, %exiting__25 ] + %324 = icmp sle i64 %323, %322 + br i1 %324, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %325 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %259, i64 %323) + %326 = bitcast i8* %325 to { { double, double }*, %Array* }** + %327 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %326, align 8 + %328 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %327, i32 0, i32 0 + %329 = load { double, double }*, { double, double }** %328, align 8 + %330 = bitcast { double, double }* %329 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %330, i32 -1) + %331 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %327, i32 0, i32 1 + %332 = load %Array*, %Array** %331, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %332, i32 -1) + %333 = bitcast { { double, double }*, %Array* }* %327 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %333, i32 -1) + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %334 = add i64 %323, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_alias_count(%Array* %259, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %indexFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %indexFunction, i32 -1) + %335 = bitcast { i64, %Callable* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %335, i32 -1) + ret double %168 +} + +define internal double @Microsoft__Quantum__Characterization__EstimateFrequency__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) { +entry: + %nUp = alloca i64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 1) + store i64 0, i64* %nUp, align 4 + %0 = sub i64 %nMeasurements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxMeasurement = phi i64 [ 0, %entry ], [ %16, %exiting__1 ] + %1 = icmp sle i64 %idxMeasurement, %0 + br i1 %1, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %register = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + store %Array* %register, %Array** %4, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %preparation, %Tuple* %2, %Tuple* null) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %register, %Array** %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %measurement, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { %Result* }* + %10 = getelementptr inbounds { %Result* }, { %Result* }* %9, i32 0, i32 0 + %result = load %Result*, %Result** %10, align 8 + %11 = call %Result* @__quantum__rt__result_get_zero() + %12 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %11) + br i1 %12, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %13 = load i64, i64* %nUp, align 4 + %14 = add i64 %13, 1 + store i64 %14, i64* %nUp, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Reset__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___8378d03f253249b0a8b7584c7ad801ff_ApplyToEach__body(%Callable* %15, %Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %register) + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %16 = add i64 %idxMeasurement, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %17 = load i64, i64* %nUp, align 4 + %18 = sitofp i64 %17 to double + %19 = sitofp i64 %nMeasurements to double + %20 = fdiv double %18, %19 + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 -1) + ret double %20 +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare %Result* @__quantum__rt__result_get_zero() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define internal void @Microsoft__Quantum__Canon___8378d03f253249b0a8b7584c7ad801ff_ApplyToEach__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %2) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 1) + %0 = call double @Microsoft__Quantum__Characterization__EstimateFrequency__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 -1) + ret double %0 +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceCP____body(double %tolerance, %Array* %coefficients) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %10) + %13 = bitcast i8* %12 to { double, double }** + %coefficient = load { double, double }*, { double, double }** %13, align 8 + %14 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %coefficient) + %16 = fcmp ogt double %15, %tolerance + br i1 %16, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %17 = sub i64 %0, 1 + br label %header__3 + +continue__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = sub i64 %0, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__3 ] + %21 = icmp sle i64 %20, %17 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %26 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +header__4: ; preds = %exiting__4, %exit__2 + %27 = phi i64 [ 0, %exit__2 ], [ %33, %exiting__4 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %27) + %30 = bitcast i8* %29 to { double, double }** + %31 = load { double, double }*, { double, double }** %30, align 8 + %32 = bitcast { double, double }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %33 = add i64 %27, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to double* + %coefficient = load double, double* %5, align 8 + %6 = call double @Microsoft__Quantum__Math__AbsD__body(double %coefficient) + %7 = fcmp oge double %6, %tolerance + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +continue__1: ; preds = %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal double @Microsoft__Quantum__Math__AbsD__body(double %a) { +entry: + %0 = fcmp olt double %a, 0.000000e+00 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = fneg double %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi double [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret double %2 +} + +define internal { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficients) { +entry: + %coefficients1 = alloca %Array*, align 8 + %coefficients0 = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %newCoefficientsLength = sdiv i64 %0, 2 + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %2 = sub i64 %newCoefficientsLength, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to double* + store double 0.000000e+00, double* %6, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %1, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %9 = sub i64 %newCoefficientsLength, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %14, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to double* + store double 0.000000e+00, double* %13, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %14 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %8, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %newCoefficientsLength, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxCoeff = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %16 = icmp sle i64 %idxCoeff, %15 + br i1 %16, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %17 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = add i64 %idxCoeff, %newCoefficientsLength + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %22) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %26 = fadd double %21, %25 + %27 = fmul double 5.000000e-01, %26 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idxCoeff) + %29 = bitcast i8* %28 to double* + store double %27, double* %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %coefficients0, align 8 + %30 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + %31 = call %Array* @__quantum__rt__array_copy(%Array* %30, i1 false) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %33 = bitcast i8* %32 to double* + %34 = load double, double* %33, align 8 + %35 = add i64 %idxCoeff, %newCoefficientsLength + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %35) + %37 = bitcast i8* %36 to double* + %38 = load double, double* %37, align 8 + %39 = fsub double %34, %38 + %40 = fmul double 5.000000e-01, %39 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %idxCoeff) + %42 = bitcast i8* %41 to double* + %43 = load double, double* %42, align 8 + store double %40, double* %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + store %Array* %31, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %idxCoeff, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %45 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 1) + %46 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Array*, %Array* }* + %49 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 1 + store %Array* %45, %Array** %49, align 8 + store %Array* %46, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + ret { %Array*, %Array* }* %48 +} + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +define internal double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) { +entry: + %0 = sitofp i64 %order to double + %1 = fsub double %0, 1.000000e+00 + %2 = fdiv double 1.000000e+00, %1 + %3 = call double @Microsoft__Quantum__Math__PowD__body(double 4.000000e+00, double %2) + %4 = fsub double 4.000000e+00, %3 + %5 = fdiv double 1.000000e+00, %4 + ret double %5 +} + +define internal double @Microsoft__Quantum__Math__PowD__body(double %x, double %y) { +entry: + %0 = call double @llvm.pow.f64(double %x, double %y) + ret double %0 +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +declare void @__quantum__qis__y__body(%Qubit*) + +declare void @__quantum__qis__z__body(%Qubit*) + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__y__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 %pauli, i1 %bitApply, %Array* %bits, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %3) + %6 = bitcast i8* %5 to { i1, %Qubit* }** + %7 = load { i1, %Qubit* }*, { i1, %Qubit* }** %6, align 8 + %8 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %7, i32 0, i32 0 + %bit = load i1, i1* %8, align 1 + %9 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %7, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %9, align 8 + %10 = icmp eq i1 %bit, %bitApply + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %11 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %12 = sub i64 %1, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %13) + %16 = bitcast i8* %15 to { i1, %Qubit* }** + %17 = load { i1, %Qubit* }*, { i1, %Qubit* }** %16, align 8 + %18 = bitcast { i1, %Qubit* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i1* + %7 = load i1, i1* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i1, %Qubit* }* + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + store i1 %7, i1* %13, align 1 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i1, %Qubit* }** + store { i1, %Qubit* }* %12, { i1, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i1, %Qubit* }** + %27 = load { i1, %Qubit* }*, { i1, %Qubit* }** %26, align 8 + %28 = bitcast { i1, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i1* + %36 = load i1, i1* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i1, %Qubit* }* + %42 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 1 + store i1 %36, i1* %42, align 1 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i1, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i1, %Qubit* }*, { i1, %Qubit* }** %45, align 8 + %47 = bitcast { i1, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i1, %Qubit* }* %41, { i1, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i1, %Qubit* }** + %56 = load { i1, %Qubit* }*, { i1, %Qubit* }** %55, align 8 + %57 = bitcast { i1, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 %pauli, i1 %bitApply, %Array* %bits, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %__qsVar0__nBits__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %1 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + %4 = insertvalue %Range zeroinitializer, i64 %3, 0 + %5 = insertvalue %Range %4, i64 -1, 1 + %6 = insertvalue %Range %5, i64 0, 2 + %7 = call %Array* @__quantum__rt__array_slice_1d(%Array* %0, %Range %6, i1 true) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %7) + %9 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %10 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 %10) + %13 = bitcast i8* %12 to { i1, %Qubit* }** + %14 = load { i1, %Qubit* }*, { i1, %Qubit* }** %13, align 8 + %15 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %14, i32 0, i32 0 + %__qsVar1__bit__ = load i1, i1* %15, align 1 + %16 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %14, i32 0, i32 1 + %__qsVar2__qubit__ = load %Qubit*, %Qubit** %16, align 8 + %17 = icmp eq i1 %__qsVar1__bit__, %bitApply + br i1 %17, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %__qsVar2__qubit__) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %21) + %24 = bitcast i8* %23 to { i1, %Qubit* }** + %25 = load { i1, %Qubit* }*, { i1, %Qubit* }** %24, align 8 + %26 = bitcast { i1, %Qubit* }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %29) + %32 = bitcast i8* %31 to { i1, %Qubit* }** + %33 = load { i1, %Qubit* }*, { i1, %Qubit* }** %32, align 8 + %34 = bitcast { i1, %Qubit* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__ctl(%Array* %__controlQubits__, { i2, i1, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 1 + %bitApply = load i1, i1* %2, align 1 + %3 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 2 + %bits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %4 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %5 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %7 = sub i64 %6, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %8 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %9 = icmp sle i64 %8, %7 + br i1 %9, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %8) + %11 = bitcast i8* %10 to { i1, %Qubit* }** + %12 = load { i1, %Qubit* }*, { i1, %Qubit* }** %11, align 8 + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %bit = load i1, i1* %13, align 1 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %14, align 8 + %15 = icmp eq i1 %bit, %bitApply + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i2, %Qubit* }* + %18 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %17, i32 0, i32 1 + store i2 %pauli, i2* %18, align 1 + store %Qubit* %qubit, %Qubit** %19, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %17) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %20 = add i64 %8, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %21 = sub i64 %6, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %22 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %22) + %25 = bitcast i8* %24 to { i1, %Qubit* }** + %26 = load { i1, %Qubit* }*, { i1, %Qubit* }** %25, align 8 + %27 = bitcast { i1, %Qubit* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %22, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__ctladj(%Array* %__controlQubits__, { i2, i1, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 1 + %bitApply = load i1, i1* %2, align 1 + %3 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 2 + %bits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %4 = getelementptr inbounds { i2, i1, %Array*, %Array* }, { i2, i1, %Array*, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %__qsVar0__nBits__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %5 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %6 = call %Array* @Microsoft__Quantum__Arrays___ad4d9934ae784786889d57b350a8a8f2_Zipped__body(%Array* %bits, %Array* %qubits) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %5, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %15 = phi i64 [ 0, %entry ], [ %27, %exiting__1 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to { i1, %Qubit* }** + %19 = load { i1, %Qubit* }*, { i1, %Qubit* }** %18, align 8 + %20 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %19, i32 0, i32 0 + %__qsVar1__bit__ = load i1, i1* %20, align 1 + %21 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %19, i32 0, i32 1 + %__qsVar2__qubit__ = load %Qubit*, %Qubit** %21, align 8 + %22 = icmp eq i1 %__qsVar1__bit__, %bitApply + br i1 %22, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i2, %Qubit* }* + %25 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %24, i32 0, i32 1 + store i2 %pauli, i2* %25, align 1 + store %Qubit* %__qsVar2__qubit__, %Qubit** %26, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %24) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %27 = add i64 %15, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %28 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %29 = sub i64 %28, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %30 = phi i64 [ 0, %exit__1 ], [ %36, %exiting__2 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %30) + %33 = bitcast i8* %32 to { i1, %Qubit* }** + %34 = load { i1, %Qubit* }*, { i1, %Qubit* }** %33, align 8 + %35 = bitcast { i1, %Qubit* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %30, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + %37 = sub i64 %7, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %38) + %41 = bitcast i8* %40 to { i1, %Qubit* }** + %42 = load { i1, %Qubit* }*, { i1, %Qubit* }** %41, align 8 + %43 = bitcast { i1, %Qubit* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %11 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %12 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %11) + %13 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %12, %Qubit* %13) + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %15 = icmp eq i64 %14, 2 + br i1 %15, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call double @Microsoft__Quantum__Math__AbsD__body(double %18) + %20 = fcmp ogt double %19, %tolerance + br i1 %20, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %22 = bitcast i8* %21 to i2* + store i2 0, i2* %22, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %theta = fmul double 1.000000e+00, %25 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %26 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %27 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %26) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients0, { %Array* }* %27) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %27, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %31 = getelementptr inbounds { %Array* }, { %Array* }* %12, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp eq i64 %0, 0 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %1 +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__fail(%String*) + +define internal %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %nElementsTotal, double %defaultElement, %Array* %inputArray) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %0 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @10, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %0, i1 true, %String* %1) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___8db1b1d8b63441b583b7338681e3b5b2_ConstantArray__body(i64 %nElementsPad, double %defaultElement) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %2 = icmp sge i64 %nElementsTotal, 0 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %3 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %5 = phi %Array* [ %3, %condTrue__1 ], [ %4, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %5 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.powi.f64.i32(double, i32) #0 + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %23 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %24, %Qubit* %target) + %25 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %25, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %26 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %26, %Qubit* %target) + %27 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %1) + %28 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %27) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %28, %Qubit* %target) + %29 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %29, %Qubit* %target) + %30 = getelementptr inbounds { %Array* }, { %Array* }* %28, i32 0, i32 0 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + %32 = bitcast { %Array* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %33 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + ret void +} + +define internal { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %__Item1__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %__Item1__, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__Item1__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 -1) + ret { %Array* }* %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 2 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + %3 = call %Array* @__quantum__rt__array_slice_1d(%Array* %array, %Range %2, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + ret %Array* %3 +} + +define internal %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp sgt i64 %0, 0 + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @11, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %1, i1 true, %String* %2) + %3 = sub i64 %0, 1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + ret %Qubit* %6 +} + +declare void @__quantum__qis__exp__body(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %12 = icmp eq i64 %11, 2 + br i1 %12, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %14 = bitcast i8* %13 to double* + %15 = load double, double* %14, align 8 + %16 = call double @Microsoft__Quantum__Math__AbsD__body(double %15) + %17 = fcmp ogt double %16, %tolerance + br i1 %17, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %19 = bitcast i8* %18 to i2* + store i2 0, i2* %19, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %21 = bitcast i8* %20 to double* + %22 = load double, double* %21, align 8 + %theta = fmul double 1.000000e+00, %22 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %23 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %24) + %25 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + %27 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %28 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %29 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %28) + %30 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %29, %Qubit* %30) + %31 = getelementptr inbounds { %Array* }, { %Array* }* %29, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__adj(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %23 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %23, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %24 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %24, %Qubit* %target) + %25 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %1) + %26 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %25) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %26, %Qubit* %target) + %27 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %27, %Qubit* %target) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %26, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %31 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %1) + %32 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %31) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %32, %Qubit* %target) + %33 = getelementptr inbounds { %Array* }, { %Array* }* %32, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %32 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 1) + %15 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %16 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + %17 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %qubits__1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, { %Array* }*, %Qubit* }* + %20 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 3 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients1, %Array** %21, align 8 + store { %Array* }* %16, { %Array* }** %22, align 8 + store %Qubit* %17, %Qubit** %23, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %19) + %24 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %25 = icmp eq i64 %24, 2 + br i1 %25, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %27 = bitcast i8* %26 to double* + %28 = load double, double* %27, align 8 + %29 = call double @Microsoft__Quantum__Math__AbsD__body(double %28) + %30 = fcmp ogt double %29, %tolerance + br i1 %30, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %32 = bitcast i8* %31 to i2* + store i2 0, i2* %32, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %34 = bitcast i8* %33 to double* + %35 = load double, double* %34, align 8 + %theta = fmul double 1.000000e+00, %35 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, double, %Array* }* + %38 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 1 + %40 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 2 + store %Array* %paulis, %Array** %38, align 8 + store double %theta, double* %39, align 8 + store %Array* %qubits__1, %Array** %40, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %37) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 1) + %41 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %42 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %41) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { double, %Array*, { %Array* }* }* + %45 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 2 + store double %tolerance, double* %45, align 8 + store %Array* %coefficients0, %Array** %46, align 8 + store { %Array* }* %42, { %Array* }** %47, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %44) + %48 = getelementptr inbounds { %Array* }, { %Array* }* %42, i32 0, i32 0 + %49 = load %Array*, %Array** %48, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %50 = bitcast { %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %51 = getelementptr inbounds { %Array* }, { %Array* }* %16, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %control, %Qubit* %target) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctl(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___1d3ac85f29c5411cb0d85cee37bd798d_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %16 = icmp eq i64 %15, 2 + br i1 %16, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %18 = bitcast i8* %17 to double* + %19 = load double, double* %18, align 8 + %20 = call double @Microsoft__Quantum__Math__AbsD__body(double %19) + %21 = fcmp ogt double %20, %tolerance + br i1 %21, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + store i2 0, i2* %23, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %25 = bitcast i8* %24 to double* + %26 = load double, double* %25, align 8 + %theta = fmul double 1.000000e+00, %26 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, double, %Array* }* + %29 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 2 + store %Array* %paulis, %Array** %29, align 8 + store double %theta, double* %30, align 8 + store %Array* %qubits__1, %Array** %31, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %28) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 1) + %32 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %33 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %32) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double, %Array*, { %Array* }* }* + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 1 + %38 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 2 + store double %tolerance, double* %36, align 8 + store %Array* %__qsVar1__coefficients0__, %Array** %37, align 8 + store { %Array* }* %33, { %Array* }** %38, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %35) + %39 = getelementptr inbounds { %Array* }, { %Array* }* %33, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + %41 = bitcast { %Array* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 1) + %42 = call %Array* @Microsoft__Quantum__Arrays___0c5cfe676dd14782a827bb8683a2275b_Most__body(%Array* %qubits__1) + %43 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %42) + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + %44 = call %Qubit* @Microsoft__Quantum__Arrays___27ad8d9739d14310a78018b7367de3ec_Tail__body(%Array* %qubits__1) + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { double, %Array*, { %Array* }*, %Qubit* }* + %47 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 1 + %49 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 2 + %50 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 3 + store double %tolerance, double* %47, align 8 + store %Array* %__qsVar2__coefficients1__, %Array** %48, align 8 + store { %Array* }* %43, { %Array* }** %49, align 8 + store %Qubit* %44, %Qubit** %50, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %46) + %51 = getelementptr inbounds { %Array* }, { %Array* }* %43, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctladj(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___43223448b8e945699e2d86c9689d21fa_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef0__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %target, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %14 = icmp eq i2 %pauli, 1 + br i1 %14, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %18 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 4 + store %Callable* %15, %Callable** %18, align 8 + store double %tolerance, double* %19, align 8 + store %Array* %coefficients, %Array** %20, align 8 + store i2 -2, i2* %21, align 1 + store { %Array* }* %control, { %Array* }** %22, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %16) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__body(%Callable* %23, %Callable* %op__1, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %24 = icmp eq i2 %pauli, -1 + br i1 %24, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %25 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 2 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 3 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 4 + store %Callable* %25, %Callable** %28, align 8 + store double %tolerance, double* %29, align 8 + store %Array* %coefficients, %Array** %30, align 8 + store i2 1, i2* %31, align 1 + store { %Array* }* %control, { %Array* }** %32, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %26) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %33 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__body(%Callable* %33, %Callable* %op__2, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %34 = icmp eq i2 %pauli, 0 + br i1 %34, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %35 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @1, i32 0, i32 0)) + %36 = icmp eq i2 1, %pauli + br i1 %36, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %38 = icmp eq i2 -1, %pauli + br i1 %38, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %39 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %40 = icmp eq i2 -2, %pauli + br i1 %40, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %43 = phi %String* [ %41, %condTrue__3 ], [ %42, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %44 = phi %String* [ %39, %condTrue__2 ], [ %43, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %45 = phi %String* [ %37, %condTrue__1 ], [ %44, %condContinue__2 ] + %46 = call %String* @__quantum__rt__string_concatenate(%String* %35, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %48) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__body(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Qubit* }* + %2 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %1, i32 0, i32 0 + store %Qubit* %target, %Qubit** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %0, %Tuple* null) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit* }* + %5 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %4, i32 0, i32 0 + store %Qubit* %target, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %innerOperation, %Tuple* %3, %Tuple* null) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Qubit* }* + %9 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %8, i32 0, i32 0 + store %Qubit* %target, %Qubit** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Qubit* }* + %14 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %13, i32 0, i32 0 + store %Qubit* %target, %Qubit** %14, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %15 = icmp eq i2 %pauli, 1 + br i1 %15, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 2 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 3 + %23 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 4 + store %Callable* %16, %Callable** %19, align 8 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients, %Array** %21, align 8 + store i2 -2, i2* %22, align 1 + store { %Array* }* %control, { %Array* }** %23, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %17) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %24 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__adj(%Callable* %24, %Callable* %__qsVar1__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %25 = icmp eq i2 %pauli, -1 + br i1 %25, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %26 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 2 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 3 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 4 + store %Callable* %26, %Callable** %29, align 8 + store double %tolerance, double* %30, align 8 + store %Array* %coefficients, %Array** %31, align 8 + store i2 1, i2* %32, align 1 + store { %Array* }* %control, { %Array* }** %33, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %27) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %34 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %34) + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__adj(%Callable* %34, %Callable* %__qsVar2__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %35 = icmp eq i2 %pauli, 0 + br i1 %35, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %36 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @1, i32 0, i32 0)) + %37 = icmp eq i2 1, %pauli + br i1 %37, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %38 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %39 = icmp eq i2 -1, %pauli + br i1 %39, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %40 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %41 = icmp eq i2 -2, %pauli + br i1 %41, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %43 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %44 = phi %String* [ %42, %condTrue__3 ], [ %43, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %45 = phi %String* [ %40, %condTrue__2 ], [ %44, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %46 = phi %String* [ %38, %condTrue__1 ], [ %45, %condContinue__2 ] + %47 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %46) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + %48 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %49 = call %String* @__quantum__rt__string_concatenate(%String* %47, %String* %48) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %49) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %op__1, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %op__2, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @1, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %17) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %__qsVar1__op__, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %__qsVar2__op__, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @1, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @2, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @3, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @4, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__adj(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Qubit* }* + %3 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %2, i32 0, i32 0 + store %Qubit* %target, %Qubit** %3, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %1, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Qubit* }* + %11 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %10, i32 0, i32 0 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__7__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__8__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctl(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Qubit* }* + %6 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %5, i32 0, i32 0 + store %Qubit* %target, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %4, %Tuple* null) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %8, %Tuple* null) + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Qubit* }* + %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 + store %Qubit* %target, %Qubit** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__9__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___b48fed7f85574cfdb7ddc3494fb7ef83_ApplyWithCA__ctladj(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Qubit* }* + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 1 + store %Array* %controlRegister, %Array** %11, align 8 + store %Qubit* %target, %Qubit** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Qubit* }* + %16 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %15, i32 0, i32 0 + store %Qubit* %target, %Qubit** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %14, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__12__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rng) { +entry: + %0 = extractvalue %Range %rng, 0 + %1 = extractvalue %Range %rng, 1 + %2 = extractvalue %Range %rng, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %3 = icmp sgt i64 %1, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %0, %preheader__1 ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %idx, %2 + %5 = icmp sge i64 %idx, %2 + %6 = select i1 %3, i1 %4, i1 %5 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + ret i1 false + +exiting__1: ; No predecessors! + %7 = add i64 %idx, %1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret i1 true +} + +define internal %Callable* @Microsoft__Quantum__Canon__MultiplexerBruteForceFromGenerator__body(i64 %0, %Callable* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %unitaryGenerator = bitcast %Tuple* %2 to { i64, %Callable* }* + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %4 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store %Callable* %1, %Callable** %4, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Callable* }* }* getelementptr ({ %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Callable*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store %Callable* %5, %Callable** %8, align 8 + store { i64, %Callable* }* %unitaryGenerator, { i64, %Callable* }** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__13__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__3__FunctionTable, %Tuple* %6) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret %Callable* %10 +} + +define internal void @Lifted__PartialApplication__13__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { { %Array* }*, %Array* }* + %4 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 0 + %5 = load { %Array* }*, { %Array* }** %4, align 8 + %6 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %9, i32 0, i32 2 + store { i64, %Callable* }* %2, { i64, %Callable* }** %10, align 8 + store { %Array* }* %5, { %Array* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array* }*, %Array* }* }, { %Array*, { { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array* }*, %Array* }*, { { %Array* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 0 + %9 = load { %Array* }*, { %Array* }** %8, align 8 + %10 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { %Array* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, i32 0, i32 2 + store { i64, %Callable* }* %7, { i64, %Callable* }** %14, align 8 + store { %Array* }* %9, { %Array* }** %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { i64, %Callable* }*, { %Array* }*, %Array* }* %13, { { i64, %Callable* }*, { %Array* }*, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__body({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load { %Array* }*, { %Array* }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__adj({ i64, %Callable* }* %4, { %Array* }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctl(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, { %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, { %Array* }*, %Array* }*, { { i64, %Callable* }*, { %Array* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctladj(%Array* %3, { { i64, %Callable* }*, { %Array* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__3__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__3__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__body({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %nIndex to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %nStates = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %7, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %8 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nStates, i64 %nUnitaries) + %9 = sub i64 %8, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxOp = phi i64 [ 0, %entry ], [ %24, %exiting__1 ] + %10 = icmp sle i64 %idxOp, %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64 }* + %13 = getelementptr inbounds { i64 }, { i64 }* %12, i32 0, i32 0 + store i64 %idxOp, i64* %13, align 4 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %11, %Tuple* %14) + %15 = bitcast %Tuple* %14 to { %Callable* }* + %16 = getelementptr inbounds { %Callable* }, { %Callable* }* %15, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %idxOp, %Callable* %17) + %19 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, %Array* }* + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + store %Array* %19, %Array** %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %idxOp, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + %25 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__adj({ i64, %Callable* }* %unitaryGenerator, { %Array* }* %index, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryFunction__ = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %1 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = trunc i64 %__qsVar0__nIndex__ to i32 + %6 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %5) + %__qsVar1__nStates__ = fptosi double %6 to i64 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %7, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %8 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar1__nStates__, i64 %__qsVar2__nUnitaries__) + %9 = sub i64 %8, 1 + %10 = sub i64 %9, 0 + %11 = sdiv i64 %10, 1 + %12 = mul i64 1, %11 + %13 = add i64 0, %12 + %14 = insertvalue %Range zeroinitializer, i64 %13, 0 + %15 = insertvalue %Range %14, i64 -1, 1 + %16 = insertvalue %Range %15, i64 0, 2 + %17 = extractvalue %Range %16, 0 + %18 = extractvalue %Range %16, 1 + %19 = extractvalue %Range %16, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %20 = icmp sgt i64 %18, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar4__idxOp__ = phi i64 [ %17, %preheader__1 ], [ %38, %exiting__1 ] + %21 = icmp sle i64 %__qsVar4__idxOp__, %19 + %22 = icmp sge i64 %__qsVar4__idxOp__, %19 + %23 = select i1 %20, i1 %21, i1 %22 + br i1 %23, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64 }* + %26 = getelementptr inbounds { i64 }, { i64 }* %25, i32 0, i32 0 + store i64 %__qsVar4__idxOp__, i64* %26, align 4 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__unitaryFunction__, %Tuple* %24, %Tuple* %27) + %28 = bitcast %Tuple* %27 to { %Callable* }* + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + %31 = call %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %__qsVar4__idxOp__, %Callable* %30) + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %32) + %33 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { %Array*, %Array* }* + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + store %Array* %33, %Array** %36, align 8 + store %Array* %target, %Array** %37, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %34, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %38 = add i64 %__qsVar4__idxOp__, %18 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + %39 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %unitaryFunction = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %nIndex = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %9 = trunc i64 %nIndex to i32 + %10 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %9) + %nStates = fptosi double %10 to i64 + %11 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %nUnitaries = load i64, i64* %11, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 1) + %12 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %nStates, i64 %nUnitaries) + %13 = sub i64 %12, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxOp = phi i64 [ 0, %entry ], [ %33, %exiting__1 ] + %14 = icmp sle i64 %idxOp, %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { i64 }* + %17 = getelementptr inbounds { i64 }, { i64 }* %16, i32 0, i32 0 + store i64 %idxOp, i64* %17, align 4 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %unitaryFunction, %Tuple* %15, %Tuple* %18) + %19 = bitcast %Tuple* %18 to { %Callable* }* + %20 = getelementptr inbounds { %Callable* }, { %Callable* }* %19, i32 0, i32 0 + %21 = load %Callable*, %Callable** %20, align 8 + %22 = call %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %idxOp, %Callable* %21) + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %24 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array*, %Array* }* + %27 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %26, i32 0, i32 1 + store %Array* %24, %Array** %27, align 8 + store %Array* %target, %Array** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Array*, { %Array*, %Array* }* }* + %31 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %30, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %31, align 8 + store { %Array*, %Array* }* %26, { %Array*, %Array* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %29, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %idxOp, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + %34 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unitaryFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unitaryFunction, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___998a3754b25e47f5b433ad1ab9592879_MultiplexOperationsBruteForceFromGenerator__ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 0 + %unitaryGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 1 + %__qsVar3__unitaryFunction__ = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %3 = bitcast { i64, %Callable* }* %unitaryGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 1 + %index = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %index, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %index to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { { i64, %Callable* }*, { %Array* }*, %Array* }, { { i64, %Callable* }*, { %Array* }*, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %__qsVar0__nIndex__ = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %9 = trunc i64 %__qsVar0__nIndex__ to i32 + %10 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %9) + %__qsVar1__nStates__ = fptosi double %10 to i64 + %11 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %unitaryGenerator, i32 0, i32 0 + %__qsVar2__nUnitaries__ = load i64, i64* %11, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 1) + %12 = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %__qsVar1__nStates__, i64 %__qsVar2__nUnitaries__) + %13 = sub i64 %12, 1 + %14 = sub i64 %13, 0 + %15 = sdiv i64 %14, 1 + %16 = mul i64 1, %15 + %17 = add i64 0, %16 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %24 = icmp sgt i64 %22, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar4__idxOp__ = phi i64 [ %21, %preheader__1 ], [ %46, %exiting__1 ] + %25 = icmp sle i64 %__qsVar4__idxOp__, %23 + %26 = icmp sge i64 %__qsVar4__idxOp__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64 }* + %30 = getelementptr inbounds { i64 }, { i64 }* %29, i32 0, i32 0 + store i64 %__qsVar4__idxOp__, i64* %30, align 4 + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__unitaryFunction__, %Tuple* %28, %Tuple* %31) + %32 = bitcast %Tuple* %31 to { %Callable* }* + %33 = getelementptr inbounds { %Callable* }, { %Callable* }* %32, i32 0, i32 0 + %34 = load %Callable*, %Callable** %33, align 8 + %35 = call %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %__qsVar4__idxOp__, %Callable* %34) + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %37 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %38 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %39 = bitcast %Tuple* %38 to { %Array*, %Array* }* + %40 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 0 + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %39, i32 0, i32 1 + store %Array* %37, %Array** %40, align 8 + store %Array* %target, %Array** %41, align 8 + %42 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %43 = bitcast %Tuple* %42 to { %Array*, { %Array*, %Array* }* }* + %44 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %43, i32 0, i32 0 + %45 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %43, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %44, align 8 + store { %Array*, %Array* }* %39, { %Array*, %Array* }** %45, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %42, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %38, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %46 = add i64 %__qsVar4__idxOp__, %22 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + %47 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__unitaryFunction__, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___79e0da793bac4e01ba7a8549000baf29_ControlledOnInt__body(i64 %numberState, %Callable* %oracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i64, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store i64 %numberState, i64* %4, align 4 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__14__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__4__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__14__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Qubit* }* %15, { i64, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__14__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Qubit* }* getelementptr ({ i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Qubit* }* %15, { i64, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__body(i64 %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__adj(i64 %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Qubit* }*, { i64, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctl(%Array* %3, { i64, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }, { %Array*, { i64, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Qubit* }*, { i64, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctladj(%Array* %3, { i64, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__4__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__4__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__body(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array*, %Qubit* }* + %4 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %3, i32 0, i32 1 + store %Array* %controlRegister, %Array** %4, align 8 + store %Qubit* %targetRegister, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__adj(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %2 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %2) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Qubit* }* + %5 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + store %Array* %controlRegister, %Array** %5, align 8 + store %Qubit* %targetRegister, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctl(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %targetRegister, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Qubit* }* %9, { %Array*, %Qubit* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___24a77cb13a0340e9ab81c004c1957748_ApplyControlledOnInt__ctladj(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Qubit* }, { i64, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %targetRegister, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Qubit* }* %9, { %Array*, %Qubit* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___85d57b1b6d144721a44197efbe3a0778_ControlledOnInt__body(i64 %numberState, %Callable* %oracle) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i64, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store i64 %numberState, i64* %4, align 4 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__15__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__5__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__15__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store i64 %2, i64* %12, align 4 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Array* }* %15, { i64, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__15__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable*, %Array*, %Array* }* getelementptr ({ i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { i64, %Callable*, %Array*, %Array* }* %15, { i64, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__body(i64 %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__adj(i64 %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Array* }*, { i64, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctl(%Array* %3, { i64, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, %Callable*, %Array*, %Array* }* }, { %Array*, { i64, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, %Callable*, %Array*, %Array* }*, { i64, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctladj(%Array* %3, { i64, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__5__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__5__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__body(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array*, %Array* }* + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + store %Array* %controlRegister, %Array** %4, align 8 + store %Array* %targetRegister, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %2, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__adj(i64 %numberState, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %0) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %1 = call %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %2 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %2) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Array* }* + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + store %Array* %controlRegister, %Array** %5, align 8 + store %Array* %targetRegister, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctl(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %bits = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %targetRegister, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___888ce2eefbc14a36a466783855921224_ApplyControlledOnInt__ctladj(%Array* %__controlQubits__, { i64, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %numberState = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { i64, %Callable*, %Array*, %Array* }, { i64, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %__qsVar0__bits__ = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %numberState, i64 %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 1) + %6 = call %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %__qsVar0__bits__, %Callable* %oracle) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %6, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Array* %targetRegister, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__bits__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____body(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %9 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %9) + %12 = bitcast i8* %11 to %Callable** + %op = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %target, %Array** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %9, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %17 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %18 = phi i64 [ 0, %exit__2 ], [ %23, %exiting__3 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %22 = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %22, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %23 = add i64 %18, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____adj(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %23, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array* }* + %22 = getelementptr inbounds { %Array* }, { %Array* }* %21, i32 0, i32 0 + store %Array* %target, %Array** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %23 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %24 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %25 = phi i64 [ 0, %exit__2 ], [ %30, %exiting__3 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %25) + %28 = bitcast i8* %27 to %Callable** + %29 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %29, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %30 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %12) + %15 = bitcast i8* %14 to %Callable** + %op = load %Callable*, %Callable** %15, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %16 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %16) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, %Array* }* + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %19, align 8 + store %Array* %target, %Array** %20, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %16, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %22 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %23) + %26 = bitcast i8* %25 to %Callable** + %27 = load %Callable*, %Callable** %26, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %14, i1 true) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %15) + %17 = sub i64 %16, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %target, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %34, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %29) + %32 = bitcast i8* %31 to %Callable** + %33 = load %Callable*, %Callable** %32, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %33, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %34 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__body(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__adj(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctl(%Array* %__controlQubits__, %Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctladj(%Array* %__controlQubits__, %Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + ret void +} + +define internal i64 @Microsoft__Quantum__Math__MinI__body(i64 %a, i64 %b) { +entry: + %0 = icmp slt i64 %a, %b + %1 = select i1 %0, i64 %a, i64 %b + ret i64 %1 +} + +define internal %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %order, { i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp sgt i64 %order, 2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %stepSizeOuter = call double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) + %4 = fmul double 4.000000e+00, %stepSizeOuter + %stepSizeInner = fsub double 1.000000e+00, %4 + %5 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Callable* }* + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 1 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %5, { i64, %Callable* }* %7, double %10, %Array* %target) + %11 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %nSteps, i64* %14, align 4 + store %Callable* %op, %Callable** %15, align 8 + %16 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %11, { i64, %Callable* }* %13, double %16, %Array* %target) + %17 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { i64, %Callable* }* + %20 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 1 + store i64 %nSteps, i64* %20, align 4 + store %Callable* %op, %Callable** %21, align 8 + %22 = fmul double %stepSizeInner, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %17, { i64, %Callable* }* %19, double %22, %Array* %target) + %23 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, %Callable* }* + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 1 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %23, { i64, %Callable* }* %25, double %28, %Array* %target) + %29 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { i64, %Callable* }* + %32 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 1 + store i64 %nSteps, i64* %32, align 4 + store %Callable* %op, %Callable** %33, align 8 + %34 = fmul double %stepSizeOuter, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %29, { i64, %Callable* }* %31, double %34, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %35 = icmp eq i64 %order, 2 + br i1 %35, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { i64, %Callable* }* + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 1 + store i64 %nSteps, i64* %38, align 4 + store %Callable* %op, %Callable** %39, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body({ i64, %Callable* }* %37, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, %Callable* }* + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + store i64 %nSteps, i64* %42, align 4 + store %Callable* %op, %Callable** %43, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body({ i64, %Callable* }* %41, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, double, %Array* }* + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %7, i32 0, i32 2 + store i64 %idx, i64* %8, align 4 + store double %5, double* %9, align 8 + store %Array* %target, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %6, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %12 = sub i64 %nSteps, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idx__1 = phi i64 [ %12, %preheader__1 ], [ %22, %exiting__2 ] + %13 = icmp sle i64 %idx__1, 0 + %14 = icmp sge i64 %idx__1, 0 + %15 = select i1 false, i1 %13, i1 %14 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { i64, double, %Array* }* + %19 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %18, i32 0, i32 2 + store i64 %idx__1, i64* %19, align 4 + store double %16, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %idx__1, -1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, double, %Array* }* + %7 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %6, i32 0, i32 2 + store i64 %idx, i64* %7, align 4 + store double %stepSize, double* %8, align 8 + store %Array* %target, %Array** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %5, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %order, { i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp sgt i64 %order, 2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %__qsVar0__stepSizeOuter__ = call double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) + %4 = fmul double 4.000000e+00, %__qsVar0__stepSizeOuter__ + %__qsVar1__stepSizeInner__ = fsub double 1.000000e+00, %4 + %5 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Callable* }* + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %7, i32 0, i32 1 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %5, { i64, %Callable* }* %7, double %10, %Array* %target) + %11 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %nSteps, i64* %14, align 4 + store %Callable* %op, %Callable** %15, align 8 + %16 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %11, { i64, %Callable* }* %13, double %16, %Array* %target) + %17 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { i64, %Callable* }* + %20 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %19, i32 0, i32 1 + store i64 %nSteps, i64* %20, align 4 + store %Callable* %op, %Callable** %21, align 8 + %22 = fmul double %__qsVar1__stepSizeInner__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %17, { i64, %Callable* }* %19, double %22, %Array* %target) + %23 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, %Callable* }* + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %25, i32 0, i32 1 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %23, { i64, %Callable* }* %25, double %28, %Array* %target) + %29 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { i64, %Callable* }* + %32 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %31, i32 0, i32 1 + store i64 %nSteps, i64* %32, align 4 + store %Callable* %op, %Callable** %33, align 8 + %34 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %29, { i64, %Callable* }* %31, double %34, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %35 = icmp eq i64 %order, 2 + br i1 %35, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { i64, %Callable* }* + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %37, i32 0, i32 1 + store i64 %nSteps, i64* %38, align 4 + store %Callable* %op, %Callable** %39, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj({ i64, %Callable* }* %37, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, %Callable* }* + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + store i64 %nSteps, i64* %42, align 4 + store %Callable* %op, %Callable** %43, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj({ i64, %Callable* }* %41, double %stepSize, %Array* %target) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + %4 = sub i64 0, %3 + %5 = sdiv i64 %4, -1 + %6 = mul i64 -1, %5 + %7 = add i64 %3, %6 + %8 = insertvalue %Range zeroinitializer, i64 %7, 0 + %9 = insertvalue %Range %8, i64 1, 1 + %10 = insertvalue %Range %9, i64 %3, 2 + %11 = extractvalue %Range %10, 0 + %12 = extractvalue %Range %10, 1 + %13 = extractvalue %Range %10, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %14 = icmp sgt i64 %12, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar1__idx__ = phi i64 [ %11, %preheader__1 ], [ %25, %exiting__1 ] + %15 = icmp sle i64 %__qsVar1__idx__, %13 + %16 = icmp sge i64 %__qsVar1__idx__, %13 + %17 = select i1 %14, i1 %15, i1 %16 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + %19 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, double, %Array* }* + %22 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %21, i32 0, i32 2 + store i64 %__qsVar1__idx__, i64* %22, align 4 + store double %19, double* %23, align 8 + store %Array* %target, %Array** %24, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %25 = add i64 %__qsVar1__idx__, %12 + br label %header__1 + +exit__1: ; preds = %header__1 + %26 = sub i64 %nSteps, 1 + %27 = sub i64 %26, 0 + %28 = sdiv i64 %27, 1 + %29 = mul i64 1, %28 + %30 = add i64 0, %29 + %31 = insertvalue %Range zeroinitializer, i64 %30, 0 + %32 = insertvalue %Range %31, i64 -1, 1 + %33 = insertvalue %Range %32, i64 0, 2 + %34 = extractvalue %Range %33, 0 + %35 = extractvalue %Range %33, 1 + %36 = extractvalue %Range %33, 2 + br label %preheader__2 + +preheader__2: ; preds = %exit__1 + %37 = icmp sgt i64 %35, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__2 + %__qsVar0__idx__ = phi i64 [ %34, %preheader__2 ], [ %48, %exiting__2 ] + %38 = icmp sle i64 %__qsVar0__idx__, %36 + %39 = icmp sge i64 %__qsVar0__idx__, %36 + %40 = select i1 %37, i1 %38, i1 %39 + br i1 %40, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %41 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %41) + %42 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { i64, double, %Array* }* + %45 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %44, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %45, align 4 + store double %42, double* %46, align 8 + store %Array* %target, %Array** %47, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %41, %Tuple* %43, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %48 = add i64 %__qsVar0__idx__, %35 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj({ i64, %Callable* }* %0, double %stepSize, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = sub i64 %nSteps, 1 + %4 = sub i64 %3, 0 + %5 = sdiv i64 %4, 1 + %6 = mul i64 1, %5 + %7 = add i64 0, %6 + %8 = insertvalue %Range zeroinitializer, i64 %7, 0 + %9 = insertvalue %Range %8, i64 -1, 1 + %10 = insertvalue %Range %9, i64 0, 2 + %11 = extractvalue %Range %10, 0 + %12 = extractvalue %Range %10, 1 + %13 = extractvalue %Range %10, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %14 = icmp sgt i64 %12, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idx__ = phi i64 [ %11, %preheader__1 ], [ %24, %exiting__1 ] + %15 = icmp sle i64 %__qsVar0__idx__, %13 + %16 = icmp sge i64 %__qsVar0__idx__, %13 + %17 = select i1 %14, i1 %15, i1 %16 + br i1 %17, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { i64, double, %Array* }* + %21 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 1 + %23 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %20, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %21, align 4 + store double %stepSize, double* %22, align 8 + store %Array* %target, %Array** %23, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %__qsVar0__idx__, %12 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %order = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %stepSize = load double, double* %4, align 8 + %5 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 0 + %nSteps = load i64, i64* %6, align 4 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 1 + %op = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %8 = icmp sgt i64 %order, 2 + br i1 %8, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %stepSizeOuter = call double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) + %9 = fmul double 4.000000e+00, %stepSizeOuter + %stepSizeInner = fsub double 1.000000e+00, %9 + %10 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, %Callable* }* + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + store i64 %nSteps, i64* %13, align 4 + store %Callable* %op, %Callable** %14, align 8 + %15 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %10, i64* %18, align 4 + store { i64, %Callable* }* %12, { i64, %Callable* }** %19, align 8 + store double %15, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %17) + %22 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, %Callable* }* + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + store i64 %nSteps, i64* %25, align 4 + store %Callable* %op, %Callable** %26, align 8 + %27 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64, { i64, %Callable* }*, double, %Array* }* + %30 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 3 + store i64 %22, i64* %30, align 4 + store { i64, %Callable* }* %24, { i64, %Callable* }** %31, align 8 + store double %27, double* %32, align 8 + store %Array* %target, %Array** %33, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %29) + %34 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Callable* }* + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + store i64 %nSteps, i64* %37, align 4 + store %Callable* %op, %Callable** %38, align 8 + %39 = fmul double %stepSizeInner, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, { i64, %Callable* }*, double, %Array* }* + %42 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 1 + %44 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 2 + %45 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 3 + store i64 %34, i64* %42, align 4 + store { i64, %Callable* }* %36, { i64, %Callable* }** %43, align 8 + store double %39, double* %44, align 8 + store %Array* %target, %Array** %45, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %41) + %46 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Callable* }* + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + store i64 %nSteps, i64* %49, align 4 + store %Callable* %op, %Callable** %50, align 8 + %51 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %53 = bitcast %Tuple* %52 to { i64, { i64, %Callable* }*, double, %Array* }* + %54 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 1 + %56 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 2 + %57 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 3 + store i64 %46, i64* %54, align 4 + store { i64, %Callable* }* %48, { i64, %Callable* }** %55, align 8 + store double %51, double* %56, align 8 + store %Array* %target, %Array** %57, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %53) + %58 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %59 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %60 = bitcast %Tuple* %59 to { i64, %Callable* }* + %61 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 0 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 1 + store i64 %nSteps, i64* %61, align 4 + store %Callable* %op, %Callable** %62, align 8 + %63 = fmul double %stepSizeOuter, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { i64, { i64, %Callable* }*, double, %Array* }* + %66 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 2 + %69 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 3 + store i64 %58, i64* %66, align 4 + store { i64, %Callable* }* %60, { i64, %Callable* }** %67, align 8 + store double %63, double* %68, align 8 + store %Array* %target, %Array** %69, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %65) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %70 = icmp eq i64 %order, 2 + br i1 %70, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { i64, %Callable* }* + %73 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 1 + store i64 %nSteps, i64* %73, align 4 + store %Callable* %op, %Callable** %74, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %75 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %76 = bitcast %Tuple* %75 to { { i64, %Callable* }*, double, %Array* }* + %77 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 0 + %78 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 1 + %79 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 2 + store { i64, %Callable* }* %72, { i64, %Callable* }** %77, align 8 + store double %stepSize, double* %78, align 8 + store %Array* %target, %Array** %79, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %76) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { i64, %Callable* }* + %82 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 1 + store i64 %nSteps, i64* %82, align 4 + store %Callable* %op, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %85 = bitcast %Tuple* %84 to { { i64, %Callable* }*, double, %Array* }* + %86 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 1 + %88 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 2 + store { i64, %Callable* }* %81, { i64, %Callable* }** %86, align 8 + store double %stepSize, double* %87, align 8 + store %Array* %target, %Array** %88, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %85) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %8 = icmp sle i64 %idx, %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %10 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, double, %Array* }* + %13 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %12, i32 0, i32 2 + store i64 %idx, i64* %13, align 4 + store double %10, double* %14, align 8 + store %Array* %target, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { i64, double, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { i64, double, %Array* }* %12, { i64, double, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %21 = sub i64 %nSteps, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idx__1 = phi i64 [ %21, %preheader__1 ], [ %36, %exiting__2 ] + %22 = icmp sle i64 %idx__1, 0 + %23 = icmp sge i64 %idx__1, 0 + %24 = select i1 false, i1 %22, i1 %23 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %25) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %26 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { i64, double, %Array* }* + %29 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %28, i32 0, i32 2 + store i64 %idx__1, i64* %29, align 4 + store double %26, double* %30, align 8 + store %Array* %target, %Array** %31, align 8 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Array*, { i64, double, %Array* }* }* + %34 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %33, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %34, align 8 + store { i64, double, %Array* }* %28, { i64, double, %Array* }** %35, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %25, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %idx__1, -1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idx, %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %9) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, double, %Array* }* + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %11, i32 0, i32 2 + store i64 %idx, i64* %12, align 4 + store double %stepSize, double* %13, align 8 + store %Array* %target, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { i64, double, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store { i64, double, %Array* }* %11, { i64, double, %Array* }** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %order = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %stepSize = load double, double* %4, align 8 + %5 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 0 + %nSteps = load i64, i64* %6, align 4 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %3, i32 0, i32 1 + %op = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %8 = icmp sgt i64 %order, 2 + br i1 %8, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %__qsVar0__stepSizeOuter__ = call double @Microsoft__Quantum__Canon____QsRef0__TrotterStepSize____body(i64 %order) + %9 = fmul double 4.000000e+00, %__qsVar0__stepSizeOuter__ + %__qsVar1__stepSizeInner__ = fsub double 1.000000e+00, %9 + %10 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, %Callable* }* + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + store i64 %nSteps, i64* %13, align 4 + store %Callable* %op, %Callable** %14, align 8 + %15 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %10, i64* %18, align 4 + store { i64, %Callable* }* %12, { i64, %Callable* }** %19, align 8 + store double %15, double* %20, align 8 + store %Array* %target, %Array** %21, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %17) + %22 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, %Callable* }* + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + store i64 %nSteps, i64* %25, align 4 + store %Callable* %op, %Callable** %26, align 8 + %27 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i64, { i64, %Callable* }*, double, %Array* }* + %30 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %29, i32 0, i32 3 + store i64 %22, i64* %30, align 4 + store { i64, %Callable* }* %24, { i64, %Callable* }** %31, align 8 + store double %27, double* %32, align 8 + store %Array* %target, %Array** %33, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %29) + %34 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Callable* }* + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + store i64 %nSteps, i64* %37, align 4 + store %Callable* %op, %Callable** %38, align 8 + %39 = fmul double %__qsVar1__stepSizeInner__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, { i64, %Callable* }*, double, %Array* }* + %42 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 1 + %44 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 2 + %45 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %41, i32 0, i32 3 + store i64 %34, i64* %42, align 4 + store { i64, %Callable* }* %36, { i64, %Callable* }** %43, align 8 + store double %39, double* %44, align 8 + store %Array* %target, %Array** %45, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %41) + %46 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Callable* }* + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + store i64 %nSteps, i64* %49, align 4 + store %Callable* %op, %Callable** %50, align 8 + %51 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %52 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %53 = bitcast %Tuple* %52 to { i64, { i64, %Callable* }*, double, %Array* }* + %54 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 0 + %55 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 1 + %56 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 2 + %57 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %53, i32 0, i32 3 + store i64 %46, i64* %54, align 4 + store { i64, %Callable* }* %48, { i64, %Callable* }** %55, align 8 + store double %51, double* %56, align 8 + store %Array* %target, %Array** %57, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %53) + %58 = sub i64 %order, 2 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %59 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %60 = bitcast %Tuple* %59 to { i64, %Callable* }* + %61 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 0 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %60, i32 0, i32 1 + store i64 %nSteps, i64* %61, align 4 + store %Callable* %op, %Callable** %62, align 8 + %63 = fmul double %__qsVar0__stepSizeOuter__, %stepSize + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { i64, { i64, %Callable* }*, double, %Array* }* + %66 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 2 + %69 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %65, i32 0, i32 3 + store i64 %58, i64* %66, align 4 + store { i64, %Callable* }* %60, { i64, %Callable* }** %67, align 8 + store double %63, double* %68, align 8 + store %Array* %target, %Array** %69, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %__controlQubits__, { i64, { i64, %Callable* }*, double, %Array* }* %65) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %70 = icmp eq i64 %order, 2 + br i1 %70, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { i64, %Callable* }* + %73 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %72, i32 0, i32 1 + store i64 %nSteps, i64* %73, align 4 + store %Callable* %op, %Callable** %74, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %75 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %76 = bitcast %Tuple* %75 to { { i64, %Callable* }*, double, %Array* }* + %77 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 0 + %78 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 1 + %79 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %76, i32 0, i32 2 + store { i64, %Callable* }* %72, { i64, %Callable* }** %77, align 8 + store double %stepSize, double* %78, align 8 + store %Array* %target, %Array** %79, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %76) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + br label %continue__1 + +else__1: ; preds = %test1__1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { i64, %Callable* }* + %82 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %81, i32 0, i32 1 + store i64 %nSteps, i64* %82, align 4 + store %Callable* %op, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %84 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %85 = bitcast %Tuple* %84 to { { i64, %Callable* }*, double, %Array* }* + %86 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 0 + %87 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 1 + %88 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %85, i32 0, i32 2 + store { i64, %Callable* }* %81, { i64, %Callable* }** %86, align 8 + store double %stepSize, double* %87, align 8 + store %Array* %target, %Array** %88, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %85) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %84, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + %8 = sub i64 0, %7 + %9 = sdiv i64 %8, -1 + %10 = mul i64 -1, %9 + %11 = add i64 %7, %10 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 1, 1 + %14 = insertvalue %Range %13, i64 %7, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar1__idx__ = phi i64 [ %15, %preheader__1 ], [ %33, %exiting__1 ] + %19 = icmp sle i64 %__qsVar1__idx__, %17 + %20 = icmp sge i64 %__qsVar1__idx__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { i64, double, %Array* }* + %26 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %25, i32 0, i32 2 + store i64 %__qsVar1__idx__, i64* %26, align 4 + store double %23, double* %27, align 8 + store %Array* %target, %Array** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { %Array*, { i64, double, %Array* }* }* + %31 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %30, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %31, align 8 + store { i64, double, %Array* }* %25, { i64, double, %Array* }** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %29, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %__qsVar1__idx__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + %34 = sub i64 %nSteps, 1 + %35 = sub i64 %34, 0 + %36 = sdiv i64 %35, 1 + %37 = mul i64 1, %36 + %38 = add i64 0, %37 + %39 = insertvalue %Range zeroinitializer, i64 %38, 0 + %40 = insertvalue %Range %39, i64 -1, 1 + %41 = insertvalue %Range %40, i64 0, 2 + %42 = extractvalue %Range %41, 0 + %43 = extractvalue %Range %41, 1 + %44 = extractvalue %Range %41, 2 + br label %preheader__2 + +preheader__2: ; preds = %exit__1 + %45 = icmp sgt i64 %43, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__2 + %__qsVar0__idx__ = phi i64 [ %42, %preheader__2 ], [ %60, %exiting__2 ] + %46 = icmp sle i64 %__qsVar0__idx__, %44 + %47 = icmp sge i64 %__qsVar0__idx__, %44 + %48 = select i1 %45, i1 %46, i1 %47 + br i1 %48, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %49 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %49, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %49) + call void @__quantum__rt__callable_make_controlled(%Callable* %49) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %50 = fmul double %stepSize, 5.000000e-01 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %51 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %52 = bitcast %Tuple* %51 to { i64, double, %Array* }* + %53 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 0 + %54 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 1 + %55 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %52, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %53, align 4 + store double %50, double* %54, align 8 + store %Array* %target, %Array** %55, align 8 + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { %Array*, { i64, double, %Array* }* }* + %58 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %57, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %58, align 8 + store { i64, double, %Array* }* %52, { i64, double, %Array* }** %59, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %49, %Tuple* %56, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %49, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %49, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %__qsVar0__idx__, %43 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %3, align 8 + %4 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %target = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 0 + %nSteps = load i64, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %2, i32 0, i32 1 + %op = load %Callable*, %Callable** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %7 = sub i64 %nSteps, 1 + %8 = sub i64 %7, 0 + %9 = sdiv i64 %8, 1 + %10 = mul i64 1, %9 + %11 = add i64 0, %10 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idx__ = phi i64 [ %15, %preheader__1 ], [ %32, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idx__, %17 + %20 = icmp sge i64 %__qsVar0__idx__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, %Array* }* getelementptr ({ i64, double, %Array* }, { i64, double, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, double, %Array* }* + %25 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %24, i32 0, i32 2 + store i64 %__qsVar0__idx__, i64* %25, align 4 + store double %stepSize, double* %26, align 8 + store %Array* %target, %Array** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, double, %Array* }* }* getelementptr ({ %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Array*, { i64, double, %Array* }* }* + %30 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %29, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %30, align 8 + store { i64, double, %Array* }* %24, { i64, double, %Array* }** %31, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %28, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %32 = add i64 %__qsVar0__idx__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__body(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Qubit* }* + %9 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %9, align 8 + store %Qubit* %targetRegister, %Qubit** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %actual, %String* %message) { +entry: + %0 = xor i1 %actual, true + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__string_update_reference_count(%String* %message, i32 1) + call void @__quantum__rt__fail(%String* %message) + unreachable + +continue__1: ; preds = %entry + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__adj(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Qubit* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Qubit* }* + %9 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %8, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %9, align 8 + store %Qubit* %targetRegister, %Qubit** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctl(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %14, align 8 + store %Qubit* %targetRegister, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctladj(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %targetRegister = load %Qubit*, %Qubit** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Qubit* }* + %14 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %13, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %14, align 8 + store %Qubit* %targetRegister, %Qubit** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Qubit* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Qubit* }* %13, { %Array*, %Qubit* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__body(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %9, align 8 + store %Array* %targetRegister, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__adj(%Array* %bits, %Callable* %oracle, %Array* %controlRegister, %Array* %targetRegister) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %2 = icmp sle i64 %0, %1 + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %3) + %4 = sub i64 %0, 1 + %5 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %4, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %5, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %9, align 8 + store %Array* %targetRegister, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctl(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %controlSubregister = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %controlSubregister, %Array** %14, align 8 + store %Array* %targetRegister, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Array* }* %13, { %Array*, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %controlSubregister) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlSubregister, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctladj(%Array* %__controlQubits__, { %Array*, %Callable*, %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %bits = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %oracle = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %controlRegister = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %targetRegister = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 1) + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %controlRegister) + %7 = icmp sle i64 %5, %6 + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @7, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %7, %String* %8) + %9 = sub i64 %5, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %__qsVar0__controlSubregister__ = call %Array* @__quantum__rt__array_slice_1d(%Array* %controlRegister, %Range %10, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__body(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %oracle, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array* }* + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %13, i32 0, i32 1 + store %Array* %__qsVar0__controlSubregister__, %Array** %14, align 8 + store %Array* %targetRegister, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array*, { %Array*, %Array* }* }* + %18 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %17, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %18, align 8 + store { %Array*, %Array* }* %13, { %Array*, %Array* }** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %16, %Tuple* null) + call void @Microsoft__Quantum__Canon__ApplyPauliFromBitString__adj(i2 1, i1 false, %Array* %bits, %Array* %__qsVar0__controlSubregister__) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__controlSubregister__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %targetRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal { double, double }* @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____body(%Callable* %outer, %Callable* %inner, double %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double }* + %2 = getelementptr inbounds { double }, { double }* %1, i32 0, i32 0 + store double %target, double* %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %inner, %Tuple* %0, %Tuple* %3) + %4 = bitcast %Tuple* %3 to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + %6 = load double, double* %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { double }* + %9 = getelementptr inbounds { double }, { double }* %8, i32 0, i32 0 + store double %6, double* %9, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %outer, %Tuple* %7, %Tuple* %10) + %11 = bitcast %Tuple* %10 to { { double, double }* }* + %12 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret { double, double }* %13 +} + +define internal %Callable* @Microsoft__Quantum__Canon___883eb98596ed49bcbde64b1b9d9f4b25_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %bits, %Array** %4, align 8 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__16__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__6__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__16__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %6 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %5, i32 0, i32 1 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Qubit* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Qubit* }* %15, { %Array*, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Qubit* }* }, { %Array*, { %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Qubit* }*, { %Array*, %Qubit* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %4, i32 0, i32 1 + %13 = load %Qubit*, %Qubit** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Qubit* }* getelementptr ({ %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Qubit* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Qubit* %13, %Qubit** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Qubit* }* %15, { %Array*, %Callable*, %Array*, %Qubit* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__body(%Array* %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Qubit* }, { %Array*, %Callable*, %Array*, %Qubit* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__adj(%Array* %5, %Callable* %6, %Array* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Qubit* }*, { %Array*, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctl(%Array* %3, { %Array*, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Qubit* }*, { %Array*, %Callable*, %Array*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___91d3c161bae34a1f91ade5994f249c68_ApplyControlledOnBitString__ctladj(%Array* %3, { %Array*, %Callable*, %Array*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__6__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__6__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___62ea9479a8404884bdf32c0866eaa1a0_ControlledOnBitString__body(%Array* %bits, %Callable* %oracle) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %bits, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Callable* }* getelementptr ({ %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %bits, %Array** %4, align 8 + store %Callable* %oracle, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__17__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__7__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__17__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %5, i32 0, i32 1 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Callable*, %Array*, %Array* }* + %12 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %11, i32 0, i32 3 + store %Array* %2, %Array** %12, align 8 + store %Callable* %4, %Callable** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Array* }* %15, { %Array*, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Callable*, %Array*, %Array* }* getelementptr ({ %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Callable*, %Array*, %Array* }* + %16 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %15, i32 0, i32 3 + store %Array* %7, %Array** %16, align 8 + store %Callable* %9, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { %Array*, %Callable*, %Array*, %Array* }* %15, { %Array*, %Callable*, %Array*, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__body(%Array* %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { %Array*, %Callable*, %Array*, %Array* }, { %Array*, %Callable*, %Array*, %Array* }* %0, i32 0, i32 3 + %5 = load %Array*, %Array** %1, align 8 + %6 = load %Callable*, %Callable** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__adj(%Array* %5, %Callable* %6, %Array* %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Array* }*, { %Array*, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctl(%Array* %3, { %Array*, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }, { %Array*, { %Array*, %Callable*, %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Callable*, %Array*, %Array* }*, { %Array*, %Callable*, %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___f447d441ad6d45e8a9c60bc47d24346d_ApplyControlledOnBitString__ctladj(%Array* %3, { %Array*, %Callable*, %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__7__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__7__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Callable* }, { %Callable*, %Array*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %outer, %Callable* %inner) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %outer, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %outer, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %inner, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %inner, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store %Callable* %outer, %Callable** %4, align 8 + store %Callable* %inner, %Callable** %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__18__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__8__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outer, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %inner, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inner, i32 -1) + ret %Callable* %6 +} + +define internal void @Lifted__PartialApplication__18__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { double }* + %6 = getelementptr inbounds { double }, { double }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, double }* getelementptr ({ %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store %Callable* %4, %Callable** %11, align 8 + store double %7, double* %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, %Callable*, double }, { %Callable*, %Callable*, double }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load %Callable*, %Callable** %2, align 8 + %6 = load double, double* %3, align 8 + %7 = call { double, double }* @Microsoft__Quantum__Canon___516334c53dfb4d4b89cd46336a852347___QsRef0__ComposedOutput____body(%Callable* %4, %Callable* %5, double %6) + %8 = bitcast %Tuple* %result-tuple to { { double, double }* }* + %9 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %8, i32 0, i32 0 + store { double, double }* %7, { double, double }** %9, align 8 + ret void +} + +define internal void @MemoryManagement__8__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__8__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, %Callable* }, { %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %number, i64 %bits) { +entry: + %tempInt = alloca i64, align 8 + %outputBits = alloca %Array*, align 8 + %0 = icmp sge i64 %bits, 0 + br i1 %0, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %1 = icmp sle i64 %bits, 63 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %2 = phi i1 [ %1, %condTrue__1 ], [ %0, %entry ] + %3 = trunc i64 %bits to i32 + %4 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %3) + %5 = fptosi double %4 to i64 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([33 x i8], [33 x i8]* @19, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__int_to_string(i64 %5) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %8) + %9 = icmp slt i64 %bits, 63 + br i1 %9, label %condTrue__2, label %condFalse__1 + +condTrue__2: ; preds = %condContinue__1 + %10 = shl i64 1, %bits + br label %condContinue__2 + +condFalse__1: ; preds = %condContinue__1 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__1, %condTrue__2 + %max = phi i64 [ %10, %condTrue__2 ], [ 9223372036854775807, %condFalse__1 ] + %11 = icmp sge i64 %number, 0 + br i1 %11, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condContinue__2 + %12 = icmp sle i64 %number, %max + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condContinue__2 + %13 = phi i1 [ %12, %condTrue__3 ], [ %11, %condContinue__2 ] + %14 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([34 x i8], [34 x i8]* @20, i32 0, i32 0)) + %15 = call %String* @__quantum__rt__int_to_string(i64 %bits) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @21, i32 0, i32 0)) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__int_to_string(i64 %number) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %13, %String* %22) + %23 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %bits) + %24 = sub i64 %bits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %condContinue__3 + %25 = phi i64 [ 0, %condContinue__3 ], [ %29, %exiting__1 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %25) + %28 = bitcast i8* %27 to i1* + store i1 false, i1* %28, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %29 = add i64 %25, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %23, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + store i64 %number, i64* %tempInt, align 4 + %30 = sub i64 %bits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %41, %exiting__2 ] + %31 = icmp sle i64 %idxBit, %30 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = load i64, i64* %tempInt, align 4 + %35 = srem i64 %34, 2 + %36 = icmp eq i64 %35, 0 + %37 = select i1 %36, i1 false, i1 true + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxBit) + %39 = bitcast i8* %38 to i1* + store i1 %37, i1* %39, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %outputBits, align 8 + %40 = sdiv i64 %34, 2 + store i64 %40, i64* %tempInt, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %41 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %42 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + ret %Array* %42 +} + +define internal void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + %4 = sub i64 %3, %1 + %5 = sdiv i64 %4, %2 + %6 = mul i64 %2, %5 + %7 = add i64 %1, %6 + %8 = sub i64 0, %2 + %9 = insertvalue %Range zeroinitializer, i64 %7, 0 + %10 = insertvalue %Range %9, i64 %8, 1 + %11 = insertvalue %Range %10, i64 %1, 2 + %12 = extractvalue %Range %11, 0 + %13 = extractvalue %Range %11, 1 + %14 = extractvalue %Range %11, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %15 = icmp sgt i64 %13, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %12, %preheader__1 ], [ %26, %exiting__1 ] + %16 = icmp sle i64 %__qsVar0__idxQubit__, %14 + %17 = icmp sge i64 %__qsVar0__idxQubit__, %14 + %18 = select i1 %15, i1 %16, i1 %17 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Qubit* }* + %25 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %24, i32 0, i32 0 + store %Qubit* %22, %Qubit** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %26 = add i64 %__qsVar0__idxQubit__, %13 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %7 = icmp sgt i64 %5, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %4, %preheader__1 ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idxQubit, %6 + %9 = icmp sge i64 %idxQubit, %6 + %10 = select i1 %7, i1 %8, i1 %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store %Qubit* %14, %Qubit** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxQubit, %5 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___cf7bb862dc544cd083b9ebf7b65b7b76_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + %7 = sub i64 %6, %4 + %8 = sdiv i64 %7, %5 + %9 = mul i64 %5, %8 + %10 = add i64 %4, %9 + %11 = sub i64 0, %5 + %12 = insertvalue %Range zeroinitializer, i64 %10, 0 + %13 = insertvalue %Range %12, i64 %11, 1 + %14 = insertvalue %Range %13, i64 %4, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %15, %preheader__1 ], [ %30, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idxQubit__, %17 + %20 = icmp sge i64 %__qsVar0__idxQubit__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, %Qubit* }* + %28 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %28, align 8 + store %Qubit* %25, %Qubit** %29, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %26, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %30 = add i64 %__qsVar0__idxQubit__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___d9c24574d9ed4a4aba478cabe8323707_DecomposedIntoTimeStepsCA__body({ i64, %Callable* }* %0, i64 %trotterOrder) { +entry: + %1 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 0 + %nSteps = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = icmp eq i64 %trotterOrder, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, i64, %Callable* }* + %7 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %6, i32 0, i32 2 + store %Callable* %4, %Callable** %7, align 8 + store i64 %nSteps, i64* %8, align 4 + store %Callable* %op, %Callable** %9, align 8 + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__19__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__9__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %10 + +test1__1: ; preds = %entry + %11 = icmp eq i64 %trotterOrder, 2 + br i1 %11, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Callable* }* getelementptr ({ %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Callable*, i64, %Callable* }* + %15 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 1 + %17 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %14, i32 0, i32 2 + store %Callable* %12, %Callable** %15, align 8 + store i64 %nSteps, i64* %16, align 4 + store %Callable* %op, %Callable** %17, align 8 + %18 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__20__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__9__FunctionTable, %Tuple* %13) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %18 + +test2__1: ; preds = %test1__1 + %19 = srem i64 %trotterOrder, 2 + %20 = icmp eq i64 %19, 0 + br i1 %20, label %then2__1, label %else__1 + +then2__1: ; preds = %test2__1 + %21 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, i64, %Callable* }* getelementptr ({ %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { %Callable*, i64, i64, %Callable* }* + %24 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %23, i32 0, i32 3 + store %Callable* %21, %Callable** %24, align 8 + store i64 %trotterOrder, i64* %25, align 4 + store i64 %nSteps, i64* %26, align 4 + store %Callable* %op, %Callable** %27, align 8 + %28 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__21__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %22) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %28 + +else__1: ; preds = %test2__1 + %29 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @8, i32 0, i32 0)) + %30 = call %String* @__quantum__rt__int_to_string(i64 %trotterOrder) + %31 = call %String* @__quantum__rt__string_concatenate(%String* %29, %String* %30) + call void @__quantum__rt__string_update_reference_count(%String* %29, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + %32 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([20 x i8], [20 x i8]* @9, i32 0, i32 0)) + %33 = call %String* @__quantum__rt__string_concatenate(%String* %31, %String* %32) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__fail(%String* %33) + unreachable + +continue__1: ; No predecessors! + unreachable +} + +define internal void @Lifted__PartialApplication__19__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____body({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____adj({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctl(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___89ed6ec91cfd47ecb5df95ff3e5db012___QsRef0__Trotter1ImplCA____ctladj(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__9__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__9__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__20__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Callable* %4, %Callable** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { i64, %Callable* }*, double, %Array* }* + %16 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %15, i32 0, i32 2 + store { i64, %Callable* }* %6, { i64, %Callable* }** %16, align 8 + store double %11, double* %17, align 8 + store %Array* %13, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %14, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i64, %Callable* }* + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + store i64 %7, i64* %12, align 4 + store %Callable* %9, %Callable** %13, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %15 = load double, double* %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, double, %Array* }* getelementptr ({ { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { { i64, %Callable* }*, double, %Array* }* + %20 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %19, i32 0, i32 2 + store { i64, %Callable* }* %11, { i64, %Callable* }** %20, align 8 + store double %15, double* %21, align 8 + store %Array* %17, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { { i64, %Callable* }*, double, %Array* }* %19, { { i64, %Callable* }*, double, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, i64, %Callable* }, { %Callable*, i64, %Callable* }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____body({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { i64, %Callable* }*, double, %Array* }, { { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____adj({ i64, %Callable* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctl(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, double, %Array* }* }, { %Array*, { { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, double, %Array* }*, { { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___af3bf30b55d24ff6ba9fb42d1b36783f___QsRef0__Trotter2ImplCA____ctladj(%Array* %3, { { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Lifted__PartialApplication__21__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Callable* }* + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 1 + store i64 %4, i64* %9, align 4 + store %Callable* %6, %Callable** %10, align 8 + %11 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 0 + %13 = load double, double* %12, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %2, i64* %18, align 4 + store { i64, %Callable* }* %8, { i64, %Callable* }** %19, align 8 + store double %13, double* %20, align 8 + store %Array* %15, %Array** %21, align 8 + %22 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %16, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Callable* }* + %9 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %8, i32 0, i32 1 + store i64 %4, i64* %9, align 4 + store %Callable* %6, %Callable** %10, align 8 + %11 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %12 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 0 + %13 = load double, double* %12, align 8 + %14 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { i64, { i64, %Callable* }*, double, %Array* }* + %18 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %17, i32 0, i32 3 + store i64 %2, i64* %18, align 4 + store { i64, %Callable* }* %8, { i64, %Callable* }** %19, align 8 + store double %13, double* %20, align 8 + store %Array* %15, %Array** %21, align 8 + %22 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %16, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 3 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %9, i64* %14, align 4 + store %Callable* %11, %Callable** %15, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %17 = load double, double* %16, align 8 + %18 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, { i64, %Callable* }*, double, %Array* }* + %22 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 3 + store i64 %7, i64* %22, align 4 + store { i64, %Callable* }* %13, { i64, %Callable* }** %23, align 8 + store double %17, double* %24, align 8 + store %Array* %19, %Array** %25, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %28 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 1 + store %Array* %3, %Array** %28, align 8 + store { i64, { i64, %Callable* }*, double, %Array* }* %21, { i64, { i64, %Callable* }*, double, %Array* }** %29, align 8 + %30 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 0 + %31 = load %Callable*, %Callable** %30, align 8 + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %32) + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %26, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 1 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 3 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, %Callable* }* + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + store i64 %9, i64* %14, align 4 + store %Callable* %11, %Callable** %15, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %17 = load double, double* %16, align 8 + %18 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { i64, %Callable* }*, double, %Array* }* getelementptr ({ i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { i64, { i64, %Callable* }*, double, %Array* }* + %22 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %21, i32 0, i32 3 + store i64 %7, i64* %22, align 4 + store { i64, %Callable* }* %13, { i64, %Callable* }** %23, align 8 + store double %17, double* %24, align 8 + store %Array* %19, %Array** %25, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* getelementptr ({ %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %28 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %27, i32 0, i32 1 + store %Array* %3, %Array** %28, align 8 + store { i64, { i64, %Callable* }*, double, %Array* }* %21, { i64, { i64, %Callable* }*, double, %Array* }** %29, align 8 + %30 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %5, i32 0, i32 0 + %31 = load %Callable*, %Callable** %30, align 8 + %32 = call %Callable* @__quantum__rt__callable_copy(%Callable* %31, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %32) + call void @__quantum__rt__callable_make_controlled(%Callable* %32) + call void @__quantum__rt__callable_invoke(%Callable* %32, %Tuple* %26, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____body(i64 %5, { i64, %Callable* }* %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { i64, %Callable* }*, double, %Array* }* + %1 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, { i64, %Callable* }*, double, %Array* }, { i64, { i64, %Callable* }*, double, %Array* }* %0, i32 0, i32 3 + %5 = load i64, i64* %1, align 4 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____adj(i64 %5, { i64, %Callable* }* %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, { i64, %Callable* }*, double, %Array* }*, { i64, { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctl(%Array* %3, { i64, { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }, { %Array*, { i64, { i64, %Callable* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, { i64, %Callable* }*, double, %Array* }*, { i64, { i64, %Callable* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___c1e9353ee9584bf1976fca4503c1777e___QsRef0__TrotterArbitraryImplCA____ctladj(%Array* %3, { i64, { i64, %Callable* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__10__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__10__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable* }, { %Callable*, i64, i64, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +declare %String* @__quantum__rt__int_to_string(i64) + +define internal %Callable* @Microsoft__Quantum__Canon___55c7b8d161af40c49ac844f8a0630208_BoundCA__body(%Array* %operations) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %operations, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %operations, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__22__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + ret %Callable* %20 +} + +define internal void @Lifted__PartialApplication__22__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___1e55230a3cc04ef0802aeb704e814540___QsRef0__ApplyBoundCA____ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__11__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__11__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____body(i64 %idxTerm, i64 %nTermsA, i64 %nTermsB, %Callable* %generatorIndexFunctionA, %Callable* %generatorIndexFunctionB) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + %0 = icmp slt i64 %idxTerm, %nTermsA + br i1 %0, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { i64 }* + %3 = getelementptr inbounds { i64 }, { i64 }* %2, i32 0, i32 0 + store i64 %idxTerm, i64* %3, align 4 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorIndexFunctionA, %Tuple* %1, %Tuple* %4) + %5 = bitcast %Tuple* %4 to { { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %7 + +else__1: ; preds = %entry + %8 = sub i64 %idxTerm, %nTermsA + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %8, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorIndexFunctionB, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %15 + +continue__1: ; No predecessors! + unreachable +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____body(double %trotterStepSize, i64 %trotterOrder, double %maxTime, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %11 = fdiv double %maxTime, %trotterStepSize + %nTimeSlices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %11) + %12 = sitofp i64 %nTimeSlices to double + %resizedTrotterStepSize = fdiv double %maxTime, %12 + %13 = sub i64 %nTimeSlices, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxTimeSlice = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %idxTimeSlice, %13 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %resizedTrotterStepSize) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Array* }* + %18 = getelementptr inbounds { %Array* }, { %Array* }* %17, i32 0, i32 0 + store %Array* %qubits, %Array** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %16, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxTimeSlice, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal i64 @Microsoft__Quantum__Math__Ceiling__body(double %value) { +entry: + %0 = call { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef1__ExtendedTruncation____body(double %value) + %1 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 0 + %truncated = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 1 + %remainder = load double, double* %2, align 8 + %3 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 2 + %isPositive = load i1, i1* %3, align 1 + %4 = call double @Microsoft__Quantum__Math__AbsD__body(double %remainder) + %5 = fcmp ole double %4, 1.000000e-15 + br i1 %5, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %6 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret i64 %truncated + +else__1: ; preds = %entry + br i1 %isPositive, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %7 = add i64 %truncated, 1 + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %8 = phi i64 [ %7, %condTrue__1 ], [ %truncated, %condFalse__1 ] + %9 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret i64 %8 + +continue__1: ; No predecessors! + unreachable +} + +define internal %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %trotterStepSize) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %6 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %12 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %11, i32 0, i32 1 + store %Callable* %9, %Callable** %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__25__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__14__FunctionTable, %Tuple* %10) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %trotterForm = bitcast %Tuple* %15 to { i64, %Callable* }* + %16 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %trotterForm, i32 0, i32 0 + %17 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %trotterForm, i32 0, i32 1 + store i64 %nTerms, i64* %16, align 4 + store %Callable* %14, %Callable** %17, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %18 = call %Callable* @Microsoft__Quantum__Canon___d9c24574d9ed4a4aba478cabe8323707_DecomposedIntoTimeStepsCA__body({ i64, %Callable* }* %trotterForm, i64 %trotterOrder) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Callable*, double }* + %21 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %20, i32 0, i32 1 + store %Callable* %18, %Callable** %21, align 8 + store double %trotterStepSize, double* %22, align 8 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__26__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %19) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Callable* %23 +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____adj(double %trotterStepSize, i64 %trotterOrder, double %maxTime, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %1 = load { %Callable* }*, { %Callable* }** %0, align 8 + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %11 = fdiv double %maxTime, %trotterStepSize + %__qsVar0__nTimeSlices__ = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %11) + %12 = sitofp i64 %__qsVar0__nTimeSlices__ to double + %__qsVar1__resizedTrotterStepSize__ = fdiv double %maxTime, %12 + %13 = sub i64 %__qsVar0__nTimeSlices__, 1 + %14 = sub i64 %13, 0 + %15 = sdiv i64 %14, 1 + %16 = mul i64 1, %15 + %17 = add i64 0, %16 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %24 = icmp sgt i64 %22, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar2__idxTimeSlice__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__1 ] + %25 = icmp sle i64 %__qsVar2__idxTimeSlice__, %23 + %26 = icmp sge i64 %__qsVar2__idxTimeSlice__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %28 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %__qsVar1__resizedTrotterStepSize__) + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array* }* + %32 = getelementptr inbounds { %Array* }, { %Array* }* %31, i32 0, i32 0 + store %Array* %qubits, %Array** %32, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %30, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %33 = add i64 %__qsVar2__idxTimeSlice__, %22 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctl(%Array* %__controlQubits__, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %trotterStepSize = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %trotterOrder = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %maxTime = load double, double* %3, align 8 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %qubits = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %17 = fdiv double %maxTime, %trotterStepSize + %nTimeSlices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %17) + %18 = sitofp i64 %nTimeSlices to double + %resizedTrotterStepSize = fdiv double %maxTime, %18 + %19 = sub i64 %nTimeSlices, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxTimeSlice = phi i64 [ 0, %entry ], [ %27, %exiting__1 ] + %20 = icmp sle i64 %idxTimeSlice, %19 + br i1 %20, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %21 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %resizedTrotterStepSize) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %21, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %qubits, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %27 = add i64 %idxTimeSlice, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctladj(%Array* %__controlQubits__, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %trotterStepSize = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %trotterOrder = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %maxTime = load double, double* %3, align 8 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %qubits = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %17 = fdiv double %maxTime, %trotterStepSize + %__qsVar0__nTimeSlices__ = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %17) + %18 = sitofp i64 %__qsVar0__nTimeSlices__ to double + %__qsVar1__resizedTrotterStepSize__ = fdiv double %maxTime, %18 + %19 = sub i64 %__qsVar0__nTimeSlices__, 1 + %20 = sub i64 %19, 0 + %21 = sdiv i64 %20, 1 + %22 = mul i64 1, %21 + %23 = add i64 0, %22 + %24 = insertvalue %Range zeroinitializer, i64 %23, 0 + %25 = insertvalue %Range %24, i64 -1, 1 + %26 = insertvalue %Range %25, i64 0, 2 + %27 = extractvalue %Range %26, 0 + %28 = extractvalue %Range %26, 1 + %29 = extractvalue %Range %26, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %30 = icmp sgt i64 %28, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar2__idxTimeSlice__ = phi i64 [ %27, %preheader__1 ], [ %40, %exiting__1 ] + %31 = icmp sle i64 %__qsVar2__idxTimeSlice__, %29 + %32 = icmp sge i64 %__qsVar2__idxTimeSlice__, %29 + %33 = select i1 %30, i1 %31, i1 %32 + br i1 %33, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %34 = call %Callable* @Microsoft__Quantum__Simulation__TrotterStep__body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %trotterOrder, double %__qsVar1__resizedTrotterStepSize__) + %35 = call %Callable* @__quantum__rt__callable_copy(%Callable* %34, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %35) + call void @__quantum__rt__callable_make_controlled(%Callable* %35) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, %Array* }* + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %37, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %38, align 8 + store %Array* %qubits, %Array** %39, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %35, %Tuple* %36, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %40 = add i64 %__qsVar2__idxTimeSlice__, %28 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____body({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %idx, double %stepsize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %6 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %idx, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorSystemFunction, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %16 = load { %Array*, %Array* }*, { %Array*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Callable* }* }* + %27 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Callable* }*, { %Callable* }** %27, align 8 + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { double, %Array* }* + %33 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %32, i32 0, i32 1 + store double %stepsize, double* %33, align 8 + store %Array* %qubits, %Array** %34, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %30, %Tuple* %31, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + %35 = bitcast { %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____adj({ { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i64 %idx, double %stepsize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %__qsVar0__evolutionSet__ = load { %Callable* }*, { %Callable* }** %0, align 8 + %1 = getelementptr inbounds { %Callable* }, { %Callable* }* %__qsVar0__evolutionSet__, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + %3 = bitcast { %Callable* }* %__qsVar0__evolutionSet__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + %4 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %__qsVar1__generatorSystem__ = load { i64, %Callable* }*, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 1 + %__qsVar3__generatorSystemFunction__ = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %6 = bitcast { i64, %Callable* }* %__qsVar1__generatorSystem__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 0 + %__qsVar2__nTerms__ = load i64, i64* %8, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { i64 }* + %11 = getelementptr inbounds { i64 }, { i64 }* %10, i32 0, i32 0 + store i64 %idx, i64* %11, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__generatorSystemFunction__, %Tuple* %9, %Tuple* %12) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %13, i32 0, i32 0 + %__qsVar4__generatorIndex__ = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 0 + %16 = load { %Array*, %Array* }*, { %Array*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %2, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Callable* }* }* + %27 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Callable* }*, { %Callable* }** %27, align 8 + %29 = getelementptr inbounds { %Callable* }, { %Callable* }* %28, i32 0, i32 0 + %30 = load %Callable*, %Callable** %29, align 8 + %31 = call %Callable* @__quantum__rt__callable_copy(%Callable* %30, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array* }* + %34 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %33, i32 0, i32 1 + store double %stepsize, double* %34, align 8 + store %Array* %qubits, %Array** %35, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %31, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + %36 = bitcast { %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctl(%Array* %__controlQubits__, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %evolutionSet = load { %Callable* }*, { %Callable* }** %2, align 8 + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %evolutionSet, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %evolutionSet to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %generatorSystem = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorSystemFunction = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %8 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %idx = load i64, i64* %10, align 4 + %11 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %stepsize = load double, double* %11, align 8 + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %13, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64 }* + %16 = getelementptr inbounds { i64 }, { i64 }* %15, i32 0, i32 0 + store i64 %idx, i64* %16, align 4 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %generatorSystemFunction, %Tuple* %14, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { { %Array*, %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %19, align 8 + %20 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %29, %Tuple* %30) + %31 = bitcast %Tuple* %30 to { { %Callable* }* }* + %32 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Callable* }*, { %Callable* }** %32, align 8 + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { double, %Array* }* + %39 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 1 + store double %stepsize, double* %39, align 8 + store %Array* %qubits, %Array** %40, align 8 + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { %Array*, { double, %Array* }* }* + %43 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %43, align 8 + store { double, %Array* }* %38, { double, %Array* }** %44, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %41, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorSystemFunction, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + %45 = bitcast { %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctladj(%Array* %__controlQubits__, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %evolutionGenerator = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %2 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %__qsVar0__evolutionSet__ = load { %Callable* }*, { %Callable* }** %2, align 8 + %3 = getelementptr inbounds { %Callable* }, { %Callable* }* %__qsVar0__evolutionSet__, i32 0, i32 0 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { %Callable* }* %__qsVar0__evolutionSet__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %__qsVar1__generatorSystem__ = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 1 + %__qsVar3__generatorSystemFunction__ = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %8 = bitcast { i64, %Callable* }* %__qsVar1__generatorSystem__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %idx = load i64, i64* %10, align 4 + %11 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %stepsize = load double, double* %11, align 8 + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %qubits = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__qsVar1__generatorSystem__, i32 0, i32 0 + %__qsVar2__nTerms__ = load i64, i64* %13, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i64 }* + %16 = getelementptr inbounds { i64 }, { i64 }* %15, i32 0, i32 0 + store i64 %idx, i64* %16, align 4 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %__qsVar3__generatorSystemFunction__, %Tuple* %14, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { { %Array*, %Array* }*, %Array* }* }* + %19 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %18, i32 0, i32 0 + %__qsVar4__generatorIndex__ = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %19, align 8 + %20 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 0 + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__, i32 0, i32 1 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %29 = bitcast { { %Array*, %Array* }*, %Array* }* %__qsVar4__generatorIndex__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }* }* getelementptr ({ { %Callable* }* }, { { %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %29, %Tuple* %30) + %31 = bitcast %Tuple* %30 to { { %Callable* }* }* + %32 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Callable* }*, { %Callable* }** %32, align 8 + %34 = getelementptr inbounds { %Callable* }, { %Callable* }* %33, i32 0, i32 0 + %35 = load %Callable*, %Callable** %34, align 8 + %36 = call %Callable* @__quantum__rt__callable_copy(%Callable* %35, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %36) + call void @__quantum__rt__callable_make_controlled(%Callable* %36) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %38 = bitcast %Tuple* %37 to { double, %Array* }* + %39 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 0 + %40 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %38, i32 0, i32 1 + store double %stepsize, double* %39, align 8 + store %Array* %qubits, %Array** %40, align 8 + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { %Array*, { double, %Array* }* }* + %43 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %42, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %43, align 8 + store { double, %Array* }* %38, { double, %Array* }** %44, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %36, %Tuple* %41, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar3__generatorSystemFunction__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %35, i32 -1) + %45 = bitcast { %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body({ i64, %Callable* }* %generatorSystemA, { i64, %Callable* }* %generatorSystemB) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystemA, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %generatorSystemA to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystemB, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { i64, %Callable* }* %generatorSystemB to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %nTermsA = call i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystemA) + %nTermsB = call i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystemB) + %generatorIndexFunctionA = call %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystemA) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 1) + %generatorIndexFunctionB = call %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystemB) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionA, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionB, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionB, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, i64, %Callable*, %Callable* }* getelementptr ({ %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Callable*, i64, i64, %Callable*, %Callable* }* + %9 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %8, i32 0, i32 4 + store %Callable* %6, %Callable** %9, align 8 + store i64 %nTermsA, i64* %10, align 4 + store i64 %nTermsB, i64* %11, align 4 + store %Callable* %generatorIndexFunctionA, %Callable** %12, align 8 + store %Callable* %generatorIndexFunctionB, %Callable** %13, align 8 + %generatorIndexFunction = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__23__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__12__FunctionTable, %Tuple* %7) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %14 = add i64 %nTermsA, %nTermsB + %15 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %14, %Callable* %generatorIndexFunction) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionA, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunctionB, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunction, i32 -1) + ret { i64, %Callable* }* %15 +} + +define internal i64 @Microsoft__Quantum__Simulation__GetGeneratorSystemNTerms__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorIndexFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + ret i64 %nTerms +} + +define internal %Callable* @Microsoft__Quantum__Simulation__GetGeneratorSystemFunction__body({ i64, %Callable* }* %generatorSystem) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 1 + %generatorIndexFunction = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + %1 = bitcast { i64, %Callable* }* %generatorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %generatorSystem, i32 0, i32 0 + %nTerms = load i64, i64* %2, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %generatorIndexFunction, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %generatorIndexFunction, i32 -1) + ret %Callable* %generatorIndexFunction +} + +define internal void @Lifted__PartialApplication__23__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 1 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 2 + %7 = load i64, i64* %6, align 4 + %8 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 3 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 4 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64, i64, %Callable*, %Callable* }* getelementptr ({ i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, i64, i64, %Callable*, %Callable* }* + %14 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %13, i32 0, i32 4 + store i64 %2, i64* %14, align 4 + store i64 %5, i64* %15, align 4 + store i64 %7, i64* %16, align 4 + store %Callable* %9, %Callable** %17, align 8 + store %Callable* %11, %Callable** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %3, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { i64, i64, i64, %Callable*, %Callable* }, { i64, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load i64, i64* %1, align 4 + %7 = load i64, i64* %2, align 4 + %8 = load i64, i64* %3, align 4 + %9 = load %Callable*, %Callable** %4, align 8 + %10 = load %Callable*, %Callable** %5, align 8 + %11 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation____QsRef0___AddGeneratorSystems____body(i64 %6, i64 %7, i64 %8, %Callable* %9, %Callable* %10) + %12 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %11, { { %Array*, %Array* }*, %Array* }** %13, align 8 + ret void +} + +define internal void @MemoryManagement__12__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__12__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, i64, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 3 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i64, i64, %Callable*, %Callable* }, { %Callable*, i64, i64, %Callable*, %Callable* }* %0, i32 0, i32 4 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %__Item1__, %Callable* %__Item2__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item2__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64, %Callable* }* + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 1 + store i64 %__Item1__, i64* %2, align 4 + store %Callable* %__Item2__, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item2__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item2__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item2__, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %__Item1__, { i64, %Callable* }* %__Item2__) { +entry: + %0 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__Item2__, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 1) + %5 = bitcast { i64, %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Callable* }*, { i64, %Callable* }* }* getelementptr ({ { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Callable* }*, { i64, %Callable* }* }* + %8 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %7, i32 0, i32 1 + store { %Callable* }* %__Item1__, { %Callable* }** %8, align 8 + store { i64, %Callable* }* %__Item2__, { i64, %Callable* }** %9, align 8 + %10 = getelementptr inbounds { %Callable* }, { %Callable* }* %__Item1__, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %__Item2__, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 1) + %14 = bitcast { %Callable* }* %__Item1__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 1) + %15 = bitcast { i64, %Callable* }* %__Item2__ to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + ret { { %Callable* }*, { i64, %Callable* }* }* %7 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %0, %Array* %__Item3__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array* }*, %Array* }* getelementptr ({ { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { %Array*, %Array* }*, %Array* }* + %3 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %2, i32 0, i32 1 + store { %Array*, %Array* }* %0, { %Array*, %Array* }** %3, align 8 + store %Array* %__Item3__, %Array** %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + %9 = bitcast { %Array*, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__Item3__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item3__, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %2 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body(i64 %idxTerm) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i64* + store i64 0, i64* %2, align 4 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to double* + store double 0.000000e+00, double* %5, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %0, %Array** %8, align 8 + store %Array* %3, %Array** %9, align 8 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 0) + %12 = bitcast i8* %11 to i64* + store i64 0, i64* %12, align 4 + %13 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %7, %Array* %10) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %13 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__IdentityGeneratorSystem__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 0, %Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__IdentityGeneratorIndex__body(i64 %2) + %4 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %5 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %4, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %3, { { %Array*, %Array* }*, %Array* }** %5, align 8 + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__SimulationAlgorithm__body(%Callable* %__Item1__) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Callable* }* + %2 = getelementptr inbounds { %Callable* }, { %Callable* }* %1, i32 0, i32 0 + store %Callable* %__Item1__, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__Item1__, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__Item1__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__Item1__, i32 -1) + ret { %Callable* }* %1 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Simulation__SumGeneratorSystems__body(%Array* %generatorSystems) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %generatorSystems) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %generatorSystems, i64 %2) + %5 = bitcast i8* %4 to { i64, %Callable* }** + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %generatorSystems, i32 1) + %11 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation__AddGeneratorSystems__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %12 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__IdentityGeneratorSystem__body() + %13 = call { i64, %Callable* }* @Microsoft__Quantum__Arrays___2d898dd22e254b94929370686c0145ed_Fold__body(%Callable* %11, { i64, %Callable* }* %12, %Array* %generatorSystems) + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + %16 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %generatorSystems, i64 %17) + %20 = bitcast i8* %19 to { i64, %Callable* }** + %21 = load { i64, %Callable* }*, { i64, %Callable* }** %20, align 8 + %22 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %21, i32 0, i32 1 + %23 = load %Callable*, %Callable** %22, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %23, i32 -1) + %24 = bitcast { i64, %Callable* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %generatorSystems, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + %26 = bitcast { i64, %Callable* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + ret { i64, %Callable* }* %13 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Arrays___2d898dd22e254b94929370686c0145ed_Fold__body(%Callable* %folder, { i64, %Callable* }* %state, %Array* %array) { +entry: + %current = alloca { i64, %Callable* }*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 1) + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %state, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %state to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %5) + %8 = bitcast i8* %7 to { i64, %Callable* }** + %9 = load { i64, %Callable* }*, { i64, %Callable* }** %8, align 8 + %10 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %9, i32 0, i32 1 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 1) + %12 = bitcast { i64, %Callable* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store { i64, %Callable* }* %state, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %14 = call %Range @Microsoft__Quantum__Arrays___ab9454a18cf34e7dab26076c15ee491d_IndexRange__body(%Array* %array) + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %18 = icmp sgt i64 %16, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxElement = phi i64 [ %15, %preheader__1 ], [ %43, %exiting__2 ] + %19 = icmp sle i64 %idxElement, %17 + %20 = icmp sge i64 %idxElement, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = load { i64, %Callable* }*, { i64, %Callable* }** %current, align 8 + %23 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %22, i32 0, i32 1 + %24 = load %Callable*, %Callable** %23, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 1) + %25 = bitcast { i64, %Callable* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 1) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %27 = bitcast i8* %26 to { i64, %Callable* }** + %28 = load { i64, %Callable* }*, { i64, %Callable* }** %27, align 8 + %29 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %28, i32 0, i32 1 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 1) + %31 = bitcast { i64, %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { { i64, %Callable* }*, { i64, %Callable* }* }* + %34 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %33, i32 0, i32 1 + store { i64, %Callable* }* %22, { i64, %Callable* }** %34, align 8 + store { i64, %Callable* }* %28, { i64, %Callable* }** %35, align 8 + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }* }, { { i64, %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %folder, %Tuple* %32, %Tuple* %36) + %37 = bitcast %Tuple* %36 to { { i64, %Callable* }* }* + %38 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %37, i32 0, i32 0 + %39 = load { i64, %Callable* }*, { i64, %Callable* }** %38, align 8 + %40 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %39, i32 0, i32 1 + %41 = load %Callable*, %Callable** %40, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %41, i32 1) + %42 = bitcast { i64, %Callable* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + store { i64, %Callable* }* %39, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %43 = add i64 %idxElement, %16 + br label %header__2 + +exit__2: ; preds = %header__2 + %44 = load { i64, %Callable* }*, { i64, %Callable* }** %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %45 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %46 = phi i64 [ 0, %exit__2 ], [ %54, %exiting__3 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %46) + %49 = bitcast i8* %48 to { i64, %Callable* }** + %50 = load { i64, %Callable* }*, { i64, %Callable* }** %49, align 8 + %51 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %50, i32 0, i32 1 + %52 = load %Callable*, %Callable** %51, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %52, i32 -1) + %53 = bitcast { i64, %Callable* }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %54 = add i64 %46, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %55 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %44, i32 0, i32 1 + %56 = load %Callable*, %Callable** %55, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %56, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %56, i32 -1) + %57 = bitcast { i64, %Callable* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + ret { i64, %Callable* }* %44 +} + +define internal void @Microsoft__Quantum__Simulation__AddGeneratorSystems__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, { i64, %Callable* }* }, { { i64, %Callable* }*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %5 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__AddGeneratorSystems__body({ i64, %Callable* }* %3, { i64, %Callable* }* %4) + %6 = bitcast %Tuple* %result-tuple to { { i64, %Callable* }* }* + %7 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %6, i32 0, i32 0 + store { i64, %Callable* }* %5, { i64, %Callable* }** %7, align 8 + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 %trotterOrder) { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, double, i64 }* + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %2, i32 0, i32 2 + store %Callable* %0, %Callable** %3, align 8 + store double %trotterStepSize, double* %4, align 8 + store i64 %trotterOrder, i64* %5, align 4 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__24__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__13__FunctionTable, %Tuple* %1) + %7 = call { %Callable* }* @Microsoft__Quantum__Simulation__SimulationAlgorithm__body(%Callable* %6) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + ret { %Callable* }* %7 +} + +define internal void @Lifted__PartialApplication__24__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %6 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 1 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 2 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %14 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store i64 %4, i64* %15, align 4 + store double %7, double* %16, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %6 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 1 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %5, i32 0, i32 2 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %14 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store i64 %4, i64* %15, align 4 + store double %7, double* %16, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 1 + %13 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 2 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 4 + store double %7, double* %18, align 8 + store i64 %9, i64* %19, align 4 + store double %11, double* %20, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %13, { { %Callable* }*, { i64, %Callable* }* }** %21, align 8 + store %Array* %15, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 0 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 1 + %13 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4, i32 0, i32 2 + %15 = load %Array*, %Array** %14, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %18 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, i32 0, i32 4 + store double %7, double* %18, align 8 + store i64 %9, i64* %19, align 4 + store double %11, double* %20, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %13, { { %Callable* }*, { i64, %Callable* }* }** %21, align 8 + store %Array* %15, %Array** %22, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %25 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %24, i32 0, i32 1 + store %Array* %3, %Array** %25, align 8 + store { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %17, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %26, align 8 + %27 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %28 = load %Callable*, %Callable** %27, align 8 + %29 = call %Callable* @__quantum__rt__callable_copy(%Callable* %28, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %29) + call void @__quantum__rt__callable_make_controlled(%Callable* %29) + call void @__quantum__rt__callable_invoke(%Callable* %29, %Tuple* %23, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %29, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load i64, i64* %2, align 4 + %8 = load double, double* %3, align 8 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____body(double %6, i64 %7, double %8, { { %Callable* }*, { i64, %Callable* }* }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %1 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load i64, i64* %2, align 4 + %8 = load double, double* %3, align 8 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____adj(double %6, i64 %7, double %8, { { %Callable* }*, { i64, %Callable* }* }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctl(%Array* %3, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }*, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterSimulationAlgorithmImpl____ctladj(%Array* %3, { double, i64, double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__13__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__13__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__25__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, double, %Array* }* + %4 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %2, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + store i64 %5, i64* %13, align 4 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64, double, %Array* }* + %4 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %3, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %12 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %11, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %2, { { %Callable* }*, { i64, %Callable* }* }** %12, align 8 + store i64 %5, i64* %13, align 4 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, double, %Array* }*, { i64, double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 1 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 2 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %16 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %7, { { %Callable* }*, { i64, %Callable* }* }** %16, align 8 + store i64 %9, i64* %17, align 4 + store double %11, double* %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i64, double, %Array* }* }, { %Array*, { i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i64, double, %Array* }*, { i64, double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %6, align 8 + %8 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 0 + %9 = load i64, i64* %8, align 4 + %10 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 1 + %11 = load double, double* %10, align 8 + %12 = getelementptr inbounds { i64, double, %Array* }, { i64, double, %Array* }* %4, i32 0, i32 2 + %13 = load %Array*, %Array** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* getelementptr ({ { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %16 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, i32 0, i32 3 + store { { %Callable* }*, { i64, %Callable* }* }* %7, { { %Callable* }*, { i64, %Callable* }* }** %16, align 8 + store i64 %9, i64* %17, align 4 + store double %11, double* %18, align 8 + store %Array* %13, %Array** %19, align 8 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* getelementptr ({ %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %22 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %21, i32 0, i32 1 + store %Array* %3, %Array** %22, align 8 + store { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %15, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %23, align 8 + %24 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %25 = load %Callable*, %Callable** %24, align 8 + %26 = call %Callable* @__quantum__rt__callable_copy(%Callable* %25, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %26) + call void @__quantum__rt__callable_make_controlled(%Callable* %26) + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %20, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %5 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %6 = load i64, i64* %2, align 4 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____body({ { %Callable* }*, { i64, %Callable* }* }* %5, i64 %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* + %1 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %0, i32 0, i32 3 + %5 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %1, align 8 + %6 = load i64, i64* %2, align 4 + %7 = load double, double* %3, align 8 + %8 = load %Array*, %Array** %4, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____adj({ { %Callable* }*, { i64, %Callable* }* }* %5, i64 %6, double %7, %Array* %8) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctl(%Array* %3, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }, { %Array*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }*, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Simulation____QsRef0__TrotterStepImpl____ctladj(%Array* %3, { { { %Callable* }*, { i64, %Callable* }* }*, i64, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__14__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__14__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__26__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Array* }* + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array* }* }* getelementptr ({ %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Array* }* %9, { double, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @MemoryManagement__15__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__15__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %indices, %Array* %array) { +entry: + %sliced = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %0 = icmp eq i64 %nSliced, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %3 = bitcast i8* %2 to i64* + %4 = load i64, i64* %3, align 4 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %4) + %6 = bitcast i8* %5 to %Qubit** + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nSliced) + %9 = sub i64 %nSliced, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %10 = phi i64 [ 0, %continue__1 ], [ %14, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to %Qubit** + store %Qubit* %7, %Qubit** %13, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %8, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %nSliced, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %27, %exiting__2 ] + %16 = icmp sle i64 %idx, %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %idx) + %20 = bitcast i8* %19 to i64* + %21 = load i64, i64* %20, align 4 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %23 = bitcast i8* %22 to %Qubit** + %24 = load %Qubit*, %Qubit** %23, align 8 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idx) + %26 = bitcast i8* %25 to %Qubit** + store %Qubit* %24, %Qubit** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %28 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + ret %Array* %28 +} + +define internal %Array* @Microsoft__Quantum__Arrays___1d2b34a15cf5490eb8142fe0e14c514a_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %9 = icmp slt i64 %8, %0 + br i1 %9, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %8, %condTrue__1 ], [ %0, %condFalse__1 ] + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + %12 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %condContinue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %14 = bitcast i8* %13 to double* + %15 = load double, double* %14, align 8 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %17 = bitcast i8* %16 to %Array** + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { double, %Array* }* + %21 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %20, i32 0, i32 1 + store double %15, double* %21, align 8 + store %Array* %18, %Array** %22, align 8 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %24 = sub i64 %nElements, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %25) + %28 = bitcast i8* %27 to %Array** + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %24 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %31) + %34 = bitcast i8* %33 to { double, %Array* }** + store { double, %Array* }* %20, { double, %Array* }** %34, align 8 + %35 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %output, align 8 + %37 = sub i64 %nElements, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %38 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %38) + %41 = bitcast i8* %40 to { double, %Array* }** + %42 = load { double, %Array* }*, { double, %Array* }** %41, align 8 + %43 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %42, i32 0, i32 1 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { double, %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %47 = sub i64 %nElements, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idxElement = phi i64 [ 1, %exit__4 ], [ %67, %exiting__5 ] + %48 = icmp sle i64 %idxElement, %47 + br i1 %48, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = call %Array* @__quantum__rt__array_copy(%Array* %49, i1 false) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %52 = bitcast i8* %51 to double* + %53 = load double, double* %52, align 8 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %55 = bitcast i8* %54 to %Array** + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 1) + %57 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %58 = bitcast %Tuple* %57 to { double, %Array* }* + %59 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %58, i32 0, i32 0 + %60 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %58, i32 0, i32 1 + store double %53, double* %59, align 8 + store %Array* %56, %Array** %60, align 8 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %idxElement) + %62 = bitcast i8* %61 to { double, %Array* }** + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %63 = load { double, %Array* }*, { double, %Array* }** %62, align 8 + %64 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %63, i32 0, i32 1 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = bitcast { double, %Array* }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + store { double, %Array* }* %58, { double, %Array* }** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + store %Array* %50, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %67 = add i64 %idxElement, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %68 = load %Array*, %Array** %output, align 8 + %69 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + %70 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %76, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %71) + %74 = bitcast i8* %73 to %Array** + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %76 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %77 = call i64 @__quantum__rt__array_get_size_1d(%Array* %68) + %78 = sub i64 %77, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %79 = phi i64 [ 0, %exit__6 ], [ %87, %exiting__7 ] + %80 = icmp sle i64 %79, %78 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %68, i64 %79) + %82 = bitcast i8* %81 to { double, %Array* }** + %83 = load { double, %Array* }*, { double, %Array* }** %82, align 8 + %84 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %83, i32 0, i32 1 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { double, %Array* }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %87 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %68 +} + +define internal %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %9 = icmp slt i64 %0, %8 + br i1 %9, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %8, %condFalse__1 ] + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %12 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %condContinue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %14 = bitcast i8* %13 to %Array** + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double }* getelementptr ({ %Array*, double }, { %Array*, double }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, double }* + %21 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %20, i32 0, i32 1 + store %Array* %15, %Array** %21, align 8 + store double %18, double* %22, align 8 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %24 = sub i64 %nElements, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %25) + %28 = bitcast i8* %27 to %Array** + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %24 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %31) + %34 = bitcast i8* %33 to { %Array*, double }** + store { %Array*, double }* %20, { %Array*, double }** %34, align 8 + %35 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %output, align 8 + %37 = sub i64 %nElements, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %38 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %38) + %41 = bitcast i8* %40 to { %Array*, double }** + %42 = load { %Array*, double }*, { %Array*, double }** %41, align 8 + %43 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %42, i32 0, i32 0 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { %Array*, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %47 = sub i64 %nElements, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idxElement = phi i64 [ 1, %exit__4 ], [ %67, %exiting__5 ] + %48 = icmp sle i64 %idxElement, %47 + br i1 %48, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = call %Array* @__quantum__rt__array_copy(%Array* %49, i1 false) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %52 = bitcast i8* %51 to %Array** + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %55 = bitcast i8* %54 to double* + %56 = load double, double* %55, align 8 + %57 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double }* getelementptr ({ %Array*, double }, { %Array*, double }* null, i32 1) to i64)) + %58 = bitcast %Tuple* %57 to { %Array*, double }* + %59 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %58, i32 0, i32 0 + %60 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %58, i32 0, i32 1 + store %Array* %53, %Array** %59, align 8 + store double %56, double* %60, align 8 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %idxElement) + %62 = bitcast i8* %61 to { %Array*, double }** + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %63 = load { %Array*, double }*, { %Array*, double }** %62, align 8 + %64 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 0 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = bitcast { %Array*, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + store { %Array*, double }* %58, { %Array*, double }** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + store %Array* %50, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %67 = add i64 %idxElement, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %68 = load %Array*, %Array** %output, align 8 + %69 = load %Array*, %Array** %21, align 8 + %70 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %76, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %71) + %74 = bitcast i8* %73 to %Array** + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %76 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %77 = call i64 @__quantum__rt__array_get_size_1d(%Array* %68) + %78 = sub i64 %77, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %79 = phi i64 [ 0, %exit__6 ], [ %87, %exiting__7 ] + %80 = icmp sle i64 %79, %78 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %68, i64 %79) + %82 = bitcast i8* %81 to { %Array*, double }** + %83 = load { %Array*, double }*, { %Array*, double }** %82, align 8 + %84 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %83, i32 0, i32 0 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { %Array*, double }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %87 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %68 +} + +define internal %Array* @Microsoft__Quantum__Arrays___00d59157a6454ecdaf64b45c69ab4afd_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to i2* + %10 = load i2, i2* %9, align 1 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i2 }* getelementptr ({ i64, i2 }, { i64, i2 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, i2 }* + %13 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %12, i32 0, i32 1 + store i64 %7, i64* %13, align 4 + store i2 %10, i2* %14, align 1 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i64, i2 }** + store { i64, i2 }* %12, { i64, i2 }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i64, i2 }** + %27 = load { i64, i2 }*, { i64, i2 }** %26, align 8 + %28 = bitcast { i64, i2 }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i64* + %36 = load i64, i64* %35, align 4 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to i2* + %39 = load i2, i2* %38, align 1 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i2 }* getelementptr ({ i64, i2 }, { i64, i2 }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, i2 }* + %42 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %41, i32 0, i32 1 + store i64 %36, i64* %42, align 4 + store i2 %39, i2* %43, align 1 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i64, i2 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i64, i2 }*, { i64, i2 }** %45, align 8 + %47 = bitcast { i64, i2 }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i64, i2 }* %41, { i64, i2 }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i64, i2 }** + %56 = load { i64, i2 }*, { i64, i2 }** %55, align 8 + %57 = bitcast { i64, i2 }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal %Array* @Microsoft__Quantum__Arrays___5ac6d1808c4040b9aa3fa0e6ce75855c_Padded__body(i64 %nElementsTotal, { double, double }* %defaultElement, %Array* %inputArray) { +entry: + %0 = bitcast { double, double }* %defaultElement to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %1 = sub i64 %nElementsInitial, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %9 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %10 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @10, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %9, i1 true, %String* %10) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___4ca44cbcd7d8480ab6bb0acabd529c9a_ConstantArray__body(i64 %nElementsPad, { double, double }* %defaultElement) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %padArray) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %20 = icmp sge i64 %nElementsTotal, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__2 + %21 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %21) + %23 = sub i64 %22, 1 + br label %header__3 + +condFalse__1: ; preds = %exit__2 + %24 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %24) + %26 = sub i64 %25, 1 + br label %header__4 + +condContinue__1: ; preds = %exit__4, %exit__3 + %27 = phi %Array* [ %21, %exit__3 ], [ %24, %exit__4 ] + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + %28 = sub i64 %nElementsInitial, 1 + br label %header__5 + +header__3: ; preds = %exiting__3, %condTrue__1 + %29 = phi i64 [ 0, %condTrue__1 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %23 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + br label %condContinue__1 + +header__4: ; preds = %exiting__4, %condFalse__1 + %36 = phi i64 [ 0, %condFalse__1 ], [ %42, %exiting__4 ] + %37 = icmp sle i64 %36, %26 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %42 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + br label %condContinue__1 + +header__5: ; preds = %exiting__5, %condContinue__1 + %43 = phi i64 [ 0, %condContinue__1 ], [ %49, %exiting__5 ] + %44 = icmp sle i64 %43, %28 + br i1 %44, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %43) + %46 = bitcast i8* %45 to { double, double }** + %47 = load { double, double }*, { double, double }** %46, align 8 + %48 = bitcast { double, double }* %47 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %49 = add i64 %43, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + %50 = sub i64 %11, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %51 = phi i64 [ 0, %exit__5 ], [ %57, %exiting__6 ] + %52 = icmp sle i64 %51, %50 + br i1 %52, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %51) + %54 = bitcast i8* %53 to { double, double }** + %55 = load { double, double }*, { double, double }** %54, align 8 + %56 = bitcast { double, double }* %55 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %57 = add i64 %51, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %58 = sub i64 %11, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %59 = phi i64 [ 0, %exit__6 ], [ %65, %exiting__7 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %59) + %62 = bitcast i8* %61 to { double, double }** + %63 = load { double, double }*, { double, double }** %62, align 8 + %64 = bitcast { double, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %65 = add i64 %59, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %27 +} + +define internal i64 @Microsoft__Quantum__Math__AbsI__body(i64 %a) { +entry: + %0 = icmp slt i64 %a, 0 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = sub i64 0, %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi i64 [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret i64 %2 +} + +define internal void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = icmp ne i1 %actual, %expected + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Diagnostics___51fffc26616d4cc5a746323fd8bcea36___QsRef0__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___4ca44cbcd7d8480ab6bb0acabd529c9a_ConstantArray__body(i64 %length, { double, double }* %value) { +entry: + %0 = bitcast { double, double }* %value to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %2 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + store { double, double }* %value, { double, double }** %6, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret %Array* %1 +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal %Array* @Microsoft__Quantum__Arrays___8db1b1d8b63441b583b7338681e3b5b2_ConstantArray__body(i64 %length, double %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to double* + store double %value, double* %5, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal %Array* @Microsoft__Quantum__Arrays___ac214dcd588b470fb29f1cc67e145065_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %7 = icmp eq i64 %length, 0 + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %9 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %11 = bitcast i8* %10 to %Array** + %12 = load %Array*, %Array** %11, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %12, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %13, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { %Callable* }* + %18 = getelementptr inbounds { %Callable* }, { %Callable* }* %17, i32 0, i32 0 + %first = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %first, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %first, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %20 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %21 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %9 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %8 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %20 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %27) + %30 = bitcast i8* %29 to %Callable** + store %Callable* %first, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %first, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %first, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %19, %Array** %retval, align 8 + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %33 = phi i64 [ 0, %exit__3 ], [ %38, %exiting__4 ] + %34 = icmp sle i64 %33, %32 + br i1 %34, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %33) + %36 = bitcast i8* %35 to %Callable** + %37 = load %Callable*, %Callable** %36, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %37, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %37, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %38 = add i64 %33, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %39 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %56, %exiting__5 ] + %40 = icmp sle i64 %idx, %39 + br i1 %40, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %41 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %42 = call %Array* @__quantum__rt__array_copy(%Array* %41, i1 false) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %44 = bitcast i8* %43 to %Array** + %45 = load %Array*, %Array** %44, align 8 + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Array* }* + %48 = getelementptr inbounds { %Array* }, { %Array* }* %47, i32 0, i32 0 + store %Array* %45, %Array** %48, align 8 + %49 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable* }* getelementptr ({ %Callable* }, { %Callable* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %46, %Tuple* %49) + %50 = bitcast %Tuple* %49 to { %Callable* }* + %51 = getelementptr inbounds { %Callable* }, { %Callable* }* %50, i32 0, i32 0 + %52 = load %Callable*, %Callable** %51, align 8 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %idx) + %54 = bitcast i8* %53 to %Callable** + call void @__quantum__rt__capture_update_alias_count(%Callable* %52, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %52, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 1) + %55 = load %Callable*, %Callable** %54, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %55, i32 -1) + store %Callable* %52, %Callable** %54, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + store %Array* %42, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %56 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %57 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %58 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %59 = phi i64 [ 0, %exit__5 ], [ %64, %exiting__6 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %59) + %62 = bitcast i8* %61 to %Array** + %63 = load %Array*, %Array** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %63, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %64 = add i64 %59, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %first, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %first, i32 -1) + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %57) + %66 = sub i64 %65, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %67 = phi i64 [ 0, %exit__6 ], [ %72, %exiting__7 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %57, i64 %67) + %70 = bitcast i8* %69 to %Callable** + %71 = load %Callable*, %Callable** %70, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %71, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %71, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %72 = add i64 %67, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %first, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret %Array* %57 +} + +define internal %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to double* + %4 = load double, double* %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %4, double* %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { { double, double }* }* + %10 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %9, i32 0, i32 0 + %first = load { double, double }*, { double, double }** %10, align 8 + %11 = bitcast { double, double }* %first to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %13 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %14 = phi i64 [ 0, %continue__1 ], [ %18, %exiting__1 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %14) + %17 = bitcast i8* %16 to { double, double }** + store { double, double }* %first, { double, double }** %17, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %14, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %12, %Array** %retval, align 8 + %19 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %27 = sub i64 %length, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idx = phi i64 [ 1, %exit__2 ], [ %46, %exiting__3 ] + %28 = icmp sle i64 %idx, %27 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + %30 = call %Array* @__quantum__rt__array_copy(%Array* %29, i1 false) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %32 = bitcast i8* %31 to double* + %33 = load double, double* %32, align 8 + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double }* + %36 = getelementptr inbounds { double }, { double }* %35, i32 0, i32 0 + store double %33, double* %36, align 8 + %37 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }* }* getelementptr ({ { double, double }* }, { { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %34, %Tuple* %37) + %38 = bitcast %Tuple* %37 to { { double, double }* }* + %39 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %38, i32 0, i32 0 + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 %idx) + %42 = bitcast i8* %41 to { double, double }** + %43 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 1) + %44 = load { double, double }*, { double, double }** %42, align 8 + %45 = bitcast { double, double }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + store { double, double }* %40, { double, double }** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 1) + store %Array* %30, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + %48 = call i64 @__quantum__rt__array_get_size_1d(%Array* %47) + %49 = sub i64 %48, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %50 = phi i64 [ 0, %exit__3 ], [ %56, %exiting__4 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %50) + %53 = bitcast i8* %52 to { double, double }** + %54 = load { double, double }*, { double, double }** %53, align 8 + %55 = bitcast { double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %56 = add i64 %50, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %47 +} + +define internal %Range @Microsoft__Quantum__Arrays___d58849b717694e4ca69317572366b289_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Range @Microsoft__Quantum__Arrays___ab9454a18cf34e7dab26076c15ee491d_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i64, %Callable* }** + %6 = load { i64, %Callable* }*, { i64, %Callable* }** %5, align 8 + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 1 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + %9 = bitcast { i64, %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %11 = sub i64 %0, 1 + %12 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %11, 2 + %13 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %14 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %14) + %17 = bitcast i8* %16 to { i64, %Callable* }** + %18 = load { i64, %Callable* }*, { i64, %Callable* }** %17, align 8 + %19 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %18, i32 0, i32 1 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + %21 = bitcast { i64, %Callable* }* %18 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %14, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %12 +} + +define internal i1 @Microsoft__Quantum__Arrays___d03f28613a2a406a92da3539b001d776_IsEmpty__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %14 = icmp eq i64 %0, 0 + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to { { double, double }*, %Array* }** + %20 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 0 + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 -1) + %24 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + %26 = bitcast { { double, double }*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %14 +} + +define internal %Callable* @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__body(i64 %index, %Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp sge i64 %index, 0 + br i1 %8, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %exit__1 + %9 = icmp slt i64 %index, %0 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %exit__1 + %10 = phi i1 [ %9, %condTrue__1 ], [ %8, %exit__1 ] + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([22 x i8], [22 x i8]* @12, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %10, %String* %11) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %index) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %condContinue__1 + %16 = phi i64 [ 0, %condContinue__1 ], [ %21, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to %Callable** + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + ret %Callable* %14 +} + +define internal %Callable* @Microsoft__Quantum__Arrays___fc3dc354bc024fd5b7f38df86565fb27_LookupFunction__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %array, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %array, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__27__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Callable* %20 +} + +define internal void @Lifted__PartialApplication__27__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64 }* + %1 = getelementptr inbounds { i64 }, { i64 }* %0, i32 0, i32 0 + %2 = load i64, i64* %1, align 4 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { i64, %Array* }* + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %7, i32 0, i32 1 + store i64 %2, i64* %8, align 4 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Array* }* + %1 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Callable* @Microsoft__Quantum__Arrays___4cd8b89fe06d48f482ef5ccfcb618894_ElementAt__body(i64 %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Callable* }* + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + store %Callable* %5, %Callable** %7, align 8 + ret void +} + +define internal void @MemoryManagement__16__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__16__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___8023f18e08eb4c09a8a8acf673dba09b_ConstantArray__body(i64 %length, i2 %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 %value, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef1__ExtendedTruncation____body(double %value) { +entry: + %truncated = fptosi double %value to i64 + %0 = sitofp i64 %truncated to double + %1 = fsub double %0, %value + %2 = fcmp oge double %value, 0.000000e+00 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, i1 }* getelementptr ({ i64, double, i1 }, { i64, double, i1 }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i64, double, i1 }* + %5 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 2 + store i64 %truncated, i64* %5, align 4 + store double %1, double* %6, align 8 + store i1 %2, i1* %7, align 1 + ret { i64, double, i1 }* %4 +} + +define internal double @Microsoft__Quantum__Math__AbsComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %d = call double @Microsoft__Quantum__Math__AbsSquaredComplex__body({ double, double }* %input) + %1 = call double @__quantum__qis__sqrt__body(double %d) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %1 +} + +define internal double @Microsoft__Quantum__Math__AbsSquaredComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %real = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %imaginary = load double, double* %2, align 8 + %3 = fmul double %real, %real + %4 = fmul double %imaginary, %imaginary + %5 = fadd double %3, %4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %5 +} + +declare double @__quantum__qis__sqrt__body(double) + +define internal double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { +entry: + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + ret double %0 +} + +declare double @__quantum__qis__arctan2__body(double, double) + +define internal double @Microsoft__Quantum__Math__ArgComplex__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %real = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %imaginary = load double, double* %2, align 8 + %3 = call double @__quantum__qis__arctan2__body(double %imaginary, double %real) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %3 +} + +define internal double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +define internal { double, double }* @Microsoft__Quantum__Math__Complex__body(double %Real, double %Imag) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Real, double* %2, align 8 + store double %Imag, double* %3, align 8 + ret { double, double }* %1 +} + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call double @Microsoft__Quantum__Math__AbsComplex__body({ double, double }* %input) + %2 = call double @Microsoft__Quantum__Math__ArgComplex__body({ double, double }* %input) + %3 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %1, double %2) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret { double, double }* %3 +} + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %Magnitude, double %Argument) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Magnitude, double* %2, align 8 + store double %Argument, double* %3, align 8 + ret { double, double }* %1 +} + +define internal double @Microsoft__Quantum__Math__Lg__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + %1 = call double @Microsoft__Quantum__Math__LogOf2__body() + %2 = fdiv double %0, %1 + ret double %2 +} + +declare double @__quantum__qis__log__body(double) + +define internal double @Microsoft__Quantum__Math__LogOf2__body() { +entry: + ret double 0x3FE62E42FEFA39EF +} + +define internal double @Microsoft__Quantum__Math__Log__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + ret double %0 +} + +define internal i64 @Microsoft__Quantum__Math__Max__body(%Array* %values) { +entry: + %max = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 0) + %1 = bitcast i8* %0 to i64* + %2 = load i64, i64* %1, align 4 + store i64 %2, i64* %max, align 4 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %3 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = load i64, i64* %max, align 4 + %9 = icmp sgt i64 %7, %8 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %11 = bitcast i8* %10 to i64* + %12 = load i64, i64* %11, align 4 + store i64 %12, i64* %max, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %13 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load i64, i64* %max, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + ret i64 %14 +} + +define internal i64 @Microsoft__Quantum__Math__Min__body(%Array* %values) { +entry: + %min = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 0) + %1 = bitcast i8* %0 to i64* + %2 = load i64, i64* %1, align 4 + store i64 %2, i64* %min, align 4 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %3 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idx = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %idx, %3 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = load i64, i64* %min, align 4 + %9 = icmp slt i64 %7, %8 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %idx) + %11 = bitcast i8* %10 to i64* + %12 = load i64, i64* %11, align 4 + store i64 %12, i64* %min, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %13 = add i64 %idx, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load i64, i64* %min, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + ret i64 %14 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.pow.f64(double, double) #0 + +define internal double @Microsoft__Quantum__Math__Sqrt__body(double %d) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double %d) + ret double %0 +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + %6 = call %Result* @__quantum__rt__result_get_zero() + call void @Microsoft__Quantum__Diagnostics__AssertQubit__body(%Result* %6, %Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__body(%Result* %expected, %Qubit* %q) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 -2, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %q, %Qubit** %5, align 8 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %0, %Array* %3, %Result* %expected, %String* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__adj(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctl(%Array* %ctrls, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %ctrls, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctladj(%Array* %__controlQubits__, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__ctl(%Array* %__controlQubits__, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double 1.000000e+00, %String* %msg, double 1.000000e-10) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__body(%Array*, %Array*, %Result*, double, %String*, double) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %controllingQubits, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Array*, %Result*, %String* }* + %7 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 3 + store %Array* %bases, %Array** %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + store %Result* %result, %Result** %9, align 8 + store %String* %msg, %String** %10, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__adj(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__ctl(%Array*, { %Array*, %Array*, %Result*, double, %String*, double }*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare %String* @__quantum__rt__result_to_string(%Result*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__adj(%Result* %expected, %Qubit* %q) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 -2, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %q, %Qubit** %5, align 8 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %0, %Array* %3, %Result* %expected, %String* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__ctl(%Array* %__controlQubits__, { %Result*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 0 + %expected = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 1 + %q = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 -2, i2* %5, align 1 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %q, %Qubit** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array*, %Result*, %String* }* + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 3 + store %Array* %3, %Array** %14, align 8 + store %Array* %6, %Array** %15, align 8 + store %Result* %expected, %Result** %16, align 8 + store %String* %11, %String** %17, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertQubit__ctladj(%Array* %__controlQubits__, { %Result*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 0 + %expected = load %Result*, %Result** %1, align 8 + %2 = getelementptr inbounds { %Result*, %Qubit* }, { %Result*, %Qubit* }* %0, i32 0, i32 1 + %q = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 -2, i2* %5, align 1 + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 0) + %8 = bitcast i8* %7 to %Qubit** + store %Qubit* %q, %Qubit** %8, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__result_to_string(%Result* %expected) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, %Array*, %Result*, %String* }* + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %13, i32 0, i32 3 + store %Array* %3, %Array** %14, align 8 + store %Array* %6, %Array** %15, align 8 + store %Result* %expected, %Result** %16, align 8 + store %String* %11, %String** %17, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %13) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %expected, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics___51fffc26616d4cc5a746323fd8bcea36___QsRef0__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @14, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_concatenate(%String* %0, %String* %message) + %2 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @15, i32 0, i32 0)) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %2, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + br i1 %expected, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %5 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @16, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @17, i32 0, i32 0)) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %7 = phi %String* [ %5, %condTrue__1 ], [ %6, %condFalse__1 ] + %8 = call %String* @__quantum__rt__string_concatenate(%String* %4, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @18, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__string_concatenate(%String* %8, %String* %9) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + br i1 %actual, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condContinue__1 + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @16, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condContinue__1 + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @17, i32 0, i32 0)) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condTrue__2 + %13 = phi %String* [ %11, %condTrue__2 ], [ %12, %condFalse__2 ] + %14 = call %String* @__quantum__rt__string_concatenate(%String* %10, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @__quantum__rt__fail(%String* %14) + unreachable +} + +define internal %Array* @Microsoft__Quantum__Convert__BoolArrayAsPauli__body(i2 %pauli, i1 %bitApply, %Array* %bits) { +entry: + %paulis = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 1) + %nBits = call i64 @__quantum__rt__array_get_size_1d(%Array* %bits) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nBits) + %1 = sub i64 %nBits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %0, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %7 = sub i64 %nBits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %8 = icmp sle i64 %idxBit, %7 + br i1 %8, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %9 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + %10 = call %Array* @__quantum__rt__array_copy(%Array* %9, i1 false) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bits, i64 %idxBit) + %12 = bitcast i8* %11 to i1* + %13 = load i1, i1* %12, align 1 + %14 = icmp eq i1 %13, %bitApply + %15 = select i1 %14, i2 %pauli, i2 0 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %idxBit) + %17 = bitcast i8* %16 to i2* + store i2 %15, i2* %17, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + store %Array* %10, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = load %Array*, %Array** %paulis, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 -1) + ret %Array* %19 +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____body(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____adj(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctl(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctladj(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____body(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %actualControl, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %__qsVar0__actualControl__, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %actualControl, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %__qsVar0__actualControl__, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____body(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array* }* + %5 = getelementptr inbounds { %Array* }, { %Array* }* %4, i32 0, i32 0 + store %Array* %1, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %bareOp, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____adj(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + store %Array* %1, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %4, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctl(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctladj(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Preparation____QsRef0__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficients, { %Range, i64 }* %0) { +entry: + %plan = alloca %Array*, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %10 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %10, align 4 + %11 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %11, align 4 + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %12, %Array** %plan, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = call { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef0__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) + %14 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 0 + %disentanglingY = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 1) + %15 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 1 + %disentanglingZ = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 2 + %newCoefficients = load %Array*, %Array** %16, align 8 + %17 = call i64 @__quantum__rt__array_get_size_1d(%Array* %newCoefficients) + %18 = sub i64 %17, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %19 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %20 = icmp sle i64 %19, %18 + br i1 %20, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %19) + %22 = bitcast i8* %21 to { double, double }** + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %19, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 1) + %26 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingZ) + br i1 %26, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__2 + %27 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 3 + %34 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 4 + %35 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 5 + store %Callable* %27, %Callable** %30, align 8 + store double %tolerance, double* %31, align 8 + store %Array* %disentanglingZ, %Array** %32, align 8 + store i2 -2, i2* %33, align 1 + store %Range %rngControl, %Range* %34, align 4 + store i64 %idxTarget, i64* %35, align 4 + %36 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__28__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__17__FunctionTable, %Tuple* %28) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to %Callable** + store %Callable* %36, %Callable** %39, align 8 + %40 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 0) + %42 = bitcast i8* %41 to %Callable** + store %Callable* %36, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 1) + br label %header__3 + +continue__1: ; preds = %exit__4, %exit__2 + %43 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingY) + br i1 %43, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %44 = load %Array*, %Array** %plan, align 8 + %45 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 1) + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %48 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 0 + %49 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 1 + %50 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 2 + %51 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 3 + %52 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 4 + %53 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 5 + store %Callable* %45, %Callable** %48, align 8 + store double %tolerance, double* %49, align 8 + store %Array* %disentanglingY, %Array** %50, align 8 + store i2 -1, i2* %51, align 1 + store %Range %rngControl, %Range* %52, align 4 + store i64 %idxTarget, i64* %53, align 4 + %54 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__29__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__17__FunctionTable, %Tuple* %46) + %55 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to %Callable** + store %Callable* %54, %Callable** %57, align 8 + %58 = call %Array* @__quantum__rt__array_concatenate(%Array* %44, %Array* %55) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %58) + %60 = sub i64 %59, 1 + br label %header__5 + +continue__2: ; preds = %exit__9, %continue__1 + %61 = call i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rngControl) + br i1 %61, label %then0__3, label %test1__1 + +then0__3: ; preds = %continue__2 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 0) + %63 = bitcast i8* %62 to { double, double }** + %64 = load { double, double }*, { double, double }** %63, align 8 + %65 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 0 + %abs = load double, double* %65, align 8 + %66 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 1 + %arg = load double, double* %66, align 8 + %67 = call double @Microsoft__Quantum__Math__AbsD__body(double %arg) + %68 = fcmp ogt double %67, %tolerance + br i1 %68, label %then0__4, label %continue__4 + +then0__4: ; preds = %then0__3 + %69 = load %Array*, %Array** %plan, align 8 + %70 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %71 = fmul double -1.000000e+00, %arg + %72 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %73 = bitcast %Tuple* %72 to { %Callable*, double, i64 }* + %74 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 0 + %75 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 1 + %76 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 2 + store %Callable* %70, %Callable** %74, align 8 + store double %71, double* %75, align 8 + store i64 %idxTarget, i64* %76, align 4 + %77 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__30__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__18__FunctionTable, %Tuple* %72) + %78 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to %Callable** + store %Callable* %77, %Callable** %80, align 8 + %81 = call %Array* @__quantum__rt__array_concatenate(%Array* %69, %Array* %78) + %82 = call i64 @__quantum__rt__array_get_size_1d(%Array* %81) + %83 = sub i64 %82, 1 + br label %header__10 + +continue__4: ; preds = %exit__14, %then0__3 + br label %continue__3 + +test1__1: ; preds = %continue__2 + %84 = call i1 @Microsoft__Quantum__Canon____QsRef0__AnyOutsideToleranceCP____body(double %tolerance, %Array* %newCoefficients) + br i1 %84, label %then1__1, label %continue__3 + +then1__1: ; preds = %test1__1 + %85 = extractvalue %Range %rngControl, 0 + %86 = extractvalue %Range %rngControl, 1 + %87 = extractvalue %Range %rngControl, 2 + %88 = add i64 %85, 1 + %89 = extractvalue %Range %rngControl, 0 + %90 = extractvalue %Range %rngControl, 1 + %91 = extractvalue %Range %rngControl, 2 + %92 = extractvalue %Range %rngControl, 0 + %93 = extractvalue %Range %rngControl, 1 + %94 = extractvalue %Range %rngControl, 2 + %95 = insertvalue %Range zeroinitializer, i64 %88, 0 + %96 = insertvalue %Range %95, i64 %90, 1 + %newControl = insertvalue %Range %96, i64 %94, 2 + %newTarget = extractvalue %Range %rngControl, 0 + %97 = extractvalue %Range %rngControl, 1 + %98 = extractvalue %Range %rngControl, 2 + %99 = load %Array*, %Array** %plan, align 8 + %100 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %101 = bitcast %Tuple* %100 to { %Range, i64 }* + %102 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 0 + %103 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 1 + store %Range %newControl, %Range* %102, align 4 + store i64 %newTarget, i64* %103, align 4 + %104 = call %Array* @Microsoft__Quantum__Preparation____QsRef0__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %newCoefficients, { %Range, i64 }* %101) + %105 = call %Array* @__quantum__rt__array_concatenate(%Array* %99, %Array* %104) + %106 = call i64 @__quantum__rt__array_get_size_1d(%Array* %105) + %107 = sub i64 %106, 1 + br label %header__15 + +continue__3: ; preds = %exit__19, %test1__1, %continue__4 + %108 = load %Array*, %Array** %plan, align 8 + %109 = sub i64 %1, 1 + br label %header__20 + +header__3: ; preds = %exiting__3, %then0__1 + %110 = phi i64 [ 0, %then0__1 ], [ %115, %exiting__3 ] + %111 = icmp sle i64 %110, 0 + br i1 %111, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 %110) + %113 = bitcast i8* %112 to %Callable** + %114 = load %Callable*, %Callable** %113, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %114, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %114, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %115 = add i64 %110, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + store %Array* %40, %Array** %plan, align 8 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %116 = phi i64 [ 0, %exit__3 ], [ %121, %exiting__4 ] + %117 = icmp sle i64 %116, 0 + br i1 %117, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 %116) + %119 = bitcast i8* %118 to %Callable** + %120 = load %Callable*, %Callable** %119, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %120, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %120, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %121 = add i64 %116, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + br label %continue__1 + +header__5: ; preds = %exiting__5, %then0__2 + %122 = phi i64 [ 0, %then0__2 ], [ %127, %exiting__5 ] + %123 = icmp sle i64 %122, %60 + br i1 %123, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %122) + %125 = bitcast i8* %124 to %Callable** + %126 = load %Callable*, %Callable** %125, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %126, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %126, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %127 = add i64 %122, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 1) + %128 = sub i64 %59, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %129 = phi i64 [ 0, %exit__5 ], [ %134, %exiting__6 ] + %130 = icmp sle i64 %129, %128 + br i1 %130, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %129) + %132 = bitcast i8* %131 to %Callable** + %133 = load %Callable*, %Callable** %132, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %133, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %133, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %134 = add i64 %129, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %135 = call i64 @__quantum__rt__array_get_size_1d(%Array* %44) + %136 = sub i64 %135, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %137 = phi i64 [ 0, %exit__6 ], [ %142, %exiting__7 ] + %138 = icmp sle i64 %137, %136 + br i1 %138, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %139 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %137) + %140 = bitcast i8* %139 to %Callable** + %141 = load %Callable*, %Callable** %140, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %141, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %141, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %142 = add i64 %137, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %143 = sub i64 %135, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %144 = phi i64 [ 0, %exit__7 ], [ %149, %exiting__8 ] + %145 = icmp sle i64 %144, %143 + br i1 %145, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %144) + %147 = bitcast i8* %146 to %Callable** + %148 = load %Callable*, %Callable** %147, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %148, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %148, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %149 = add i64 %144, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + store %Array* %58, %Array** %plan, align 8 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %150 = phi i64 [ 0, %exit__8 ], [ %155, %exiting__9 ] + %151 = icmp sle i64 %150, 0 + br i1 %151, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 %150) + %153 = bitcast i8* %152 to %Callable** + %154 = load %Callable*, %Callable** %153, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %154, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %154, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %155 = add i64 %150, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 -1) + br label %continue__2 + +header__10: ; preds = %exiting__10, %then0__4 + %156 = phi i64 [ 0, %then0__4 ], [ %161, %exiting__10 ] + %157 = icmp sle i64 %156, %83 + br i1 %157, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %158 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %156) + %159 = bitcast i8* %158 to %Callable** + %160 = load %Callable*, %Callable** %159, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %160, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %160, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %161 = add i64 %156, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %162 = sub i64 %82, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %163) + %166 = bitcast i8* %165 to %Callable** + %167 = load %Callable*, %Callable** %166, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %167, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %167, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 1) + %169 = call i64 @__quantum__rt__array_get_size_1d(%Array* %69) + %170 = sub i64 %169, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %176, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %171) + %174 = bitcast i8* %173 to %Callable** + %175 = load %Callable*, %Callable** %174, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %175, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %175, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %176 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 -1) + %177 = sub i64 %169, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %178 = phi i64 [ 0, %exit__12 ], [ %183, %exiting__13 ] + %179 = icmp sle i64 %178, %177 + br i1 %179, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %180 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %178) + %181 = bitcast i8* %180 to %Callable** + %182 = load %Callable*, %Callable** %181, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %182, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %182, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %183 = add i64 %178, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + store %Array* %81, %Array** %plan, align 8 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %184 = phi i64 [ 0, %exit__13 ], [ %189, %exiting__14 ] + %185 = icmp sle i64 %184, 0 + br i1 %185, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 %184) + %187 = bitcast i8* %186 to %Callable** + %188 = load %Callable*, %Callable** %187, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %188, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %188, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %189 = add i64 %184, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + br label %continue__4 + +header__15: ; preds = %exiting__15, %then1__1 + %190 = phi i64 [ 0, %then1__1 ], [ %195, %exiting__15 ] + %191 = icmp sle i64 %190, %107 + br i1 %191, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %190) + %193 = bitcast i8* %192 to %Callable** + %194 = load %Callable*, %Callable** %193, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %194, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %194, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %195 = add i64 %190, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 1) + %196 = sub i64 %106, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %197 = phi i64 [ 0, %exit__15 ], [ %202, %exiting__16 ] + %198 = icmp sle i64 %197, %196 + br i1 %198, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %199 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %197) + %200 = bitcast i8* %199 to %Callable** + %201 = load %Callable*, %Callable** %200, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %201, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %201, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %202 = add i64 %197, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %105, i32 1) + %203 = call i64 @__quantum__rt__array_get_size_1d(%Array* %99) + %204 = sub i64 %203, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %205 = phi i64 [ 0, %exit__16 ], [ %210, %exiting__17 ] + %206 = icmp sle i64 %205, %204 + br i1 %206, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %205) + %208 = bitcast i8* %207 to %Callable** + %209 = load %Callable*, %Callable** %208, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %209, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %209, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %210 = add i64 %205, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + %211 = sub i64 %203, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %212 = phi i64 [ 0, %exit__17 ], [ %217, %exiting__18 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %212) + %215 = bitcast i8* %214 to %Callable** + %216 = load %Callable*, %Callable** %215, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %216, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %216, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %217 = add i64 %212, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 -1) + store %Array* %105, %Array** %plan, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %100, i32 -1) + %218 = call i64 @__quantum__rt__array_get_size_1d(%Array* %104) + %219 = sub i64 %218, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %220 = phi i64 [ 0, %exit__18 ], [ %225, %exiting__19 ] + %221 = icmp sle i64 %220, %219 + br i1 %221, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 %220) + %223 = bitcast i8* %222 to %Callable** + %224 = load %Callable*, %Callable** %223, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %224, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %224, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %225 = add i64 %220, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + br label %continue__3 + +header__20: ; preds = %exiting__20, %continue__3 + %226 = phi i64 [ 0, %continue__3 ], [ %232, %exiting__20 ] + %227 = icmp sle i64 %226, %109 + br i1 %227, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %228 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %226) + %229 = bitcast i8* %228 to { double, double }** + %230 = load { double, double }*, { double, double }** %229, align 8 + %231 = bitcast { double, double }* %230 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %231, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %232 = add i64 %226, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %233 = call i64 @__quantum__rt__array_get_size_1d(%Array* %108) + %234 = sub i64 %233, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %235 = phi i64 [ 0, %exit__20 ], [ %240, %exiting__21 ] + %236 = icmp sle i64 %235, %234 + br i1 %236, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %235) + %238 = bitcast i8* %237 to %Callable** + %239 = load %Callable*, %Callable** %238, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %239, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %239, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %240 = add i64 %235, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %108, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 -1) + %241 = sub i64 %17, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %242 = phi i64 [ 0, %exit__21 ], [ %248, %exiting__22 ] + %243 = icmp sle i64 %242, %241 + br i1 %243, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %242) + %245 = bitcast i8* %244 to { double, double }** + %246 = load { double, double }*, { double, double }** %245, align 8 + %247 = bitcast { double, double }* %246 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %248 = add i64 %242, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 -1) + %249 = sub i64 %17, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %250 = phi i64 [ 0, %exit__22 ], [ %256, %exiting__23 ] + %251 = icmp sle i64 %250, %249 + br i1 %251, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %252 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %250) + %253 = bitcast i8* %252 to { double, double }** + %254 = load { double, double }*, { double, double }** %253, align 8 + %255 = bitcast { double, double }* %254 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %255, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %256 = add i64 %250, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_reference_count(%Array* %newCoefficients, i32 -1) + %257 = bitcast { %Array*, %Array*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %257, i32 -1) + ret %Array* %108 +} + +define internal { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef0__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) { +entry: + %newCoefficients = alloca %Array*, align 8 + %disentanglingY = alloca %Array*, align 8 + %disentanglingZ = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sdiv i64 %0, 2 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %9) + %11 = sub i64 %9, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %12) + %15 = bitcast i8* %14 to double* + store double 0.000000e+00, double* %15, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %10, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %17 = sdiv i64 %0, 2 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %17) + %19 = sub i64 %17, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %20 = phi i64 [ 0, %exit__2 ], [ %24, %exiting__3 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %20) + %23 = bitcast i8* %22 to double* + store double 0.000000e+00, double* %23, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %24 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %25 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %26 = sdiv i64 %0, 2 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %26) + %28 = sub i64 %26, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %29 = phi i64 [ 0, %exit__3 ], [ %34, %exiting__4 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + store { double, double }* %25, { double, double }** %32, align 8 + %33 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %34 = add i64 %29, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + store %Array* %27, %Array** %newCoefficients, align 8 + %35 = sub i64 %26, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %36 = phi i64 [ 0, %exit__4 ], [ %42, %exiting__5 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %41, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %42 = add i64 %36, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %43 = sub i64 %0, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__5 + br label %header__6 + +header__6: ; preds = %exiting__6, %preheader__1 + %idxCoeff = phi i64 [ 0, %preheader__1 ], [ %80, %exiting__6 ] + %44 = icmp sle i64 %idxCoeff, %43 + %45 = icmp sge i64 %idxCoeff, %43 + %46 = select i1 true, i1 %44, i1 %45 + br i1 %46, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = add i64 %idxCoeff, 1 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %50) + %52 = bitcast i8* %51 to { double, double }** + %53 = load { double, double }*, { double, double }** %52, align 8 + %54 = call { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %49, { double, double }* %53) + %55 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 0 + %rt = load { double, double }*, { double, double }** %55, align 8 + %56 = bitcast { double, double }* %rt to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 1 + %phi = load double, double* %57, align 8 + %58 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 2 + %theta = load double, double* %58, align 8 + %59 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %59, i32 -1) + %60 = call %Array* @__quantum__rt__array_copy(%Array* %59, i1 false) + %61 = fmul double 5.000000e-01, %phi + %62 = sdiv i64 %idxCoeff, 2 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %62) + %64 = bitcast i8* %63 to double* + store double %61, double* %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 1) + store %Array* %60, %Array** %disentanglingZ, align 8 + %65 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = call %Array* @__quantum__rt__array_copy(%Array* %65, i1 false) + %67 = fmul double 5.000000e-01, %theta + %68 = sdiv i64 %idxCoeff, 2 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 %68) + %70 = bitcast i8* %69 to double* + %71 = load double, double* %70, align 8 + store double %67, double* %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + store %Array* %66, %Array** %disentanglingY, align 8 + %72 = load %Array*, %Array** %newCoefficients, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = call %Array* @__quantum__rt__array_copy(%Array* %72, i1 false) + %74 = sdiv i64 %idxCoeff, 2 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %74) + %76 = bitcast i8* %75 to { double, double }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 1) + %77 = load { double, double }*, { double, double }** %76, align 8 + %78 = bitcast { double, double }* %77 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i32 -1) + store { double, double }* %rt, { double, double }** %76, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 1) + store %Array* %73, %Array** %newCoefficients, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + %79 = bitcast { { double, double }*, double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %idxCoeff, 2 + br label %header__6 + +exit__6: ; preds = %header__6 + %81 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %82 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 1) + %83 = load %Array*, %Array** %newCoefficients, align 8 + %84 = call i64 @__quantum__rt__array_get_size_1d(%Array* %83) + %85 = sub i64 %84, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %86 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %87 = icmp sle i64 %86, %85 + br i1 %87, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %86) + %89 = bitcast i8* %88 to { double, double }** + %90 = load { double, double }*, { double, double }** %89, align 8 + %91 = bitcast { double, double }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %86, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 1) + %93 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Array* }* getelementptr ({ %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* null, i32 1) to i64)) + %94 = bitcast %Tuple* %93 to { %Array*, %Array*, %Array* }* + %95 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 0 + %96 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 1 + %97 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 2 + store %Array* %81, %Array** %95, align 8 + store %Array* %82, %Array** %96, align 8 + store %Array* %83, %Array** %97, align 8 + %98 = sub i64 %0, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %99 = phi i64 [ 0, %exit__7 ], [ %105, %exiting__8 ] + %100 = icmp sle i64 %99, %98 + br i1 %100, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %99) + %102 = bitcast i8* %101 to { double, double }** + %103 = load { double, double }*, { double, double }** %102, align 8 + %104 = bitcast { double, double }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %105 = add i64 %99, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + %106 = sub i64 %84, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %107 = phi i64 [ 0, %exit__8 ], [ %113, %exiting__9 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %107) + %110 = bitcast i8* %109 to { double, double }** + %111 = load { double, double }*, { double, double }** %110, align 8 + %112 = bitcast { double, double }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %112, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %113 = add i64 %107, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %83, i32 -1) + %114 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + %115 = sub i64 %84, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %116 = phi i64 [ 0, %exit__9 ], [ %122, %exiting__10 ] + %117 = icmp sle i64 %116, %115 + br i1 %117, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %116) + %119 = bitcast i8* %118 to { double, double }** + %120 = load { double, double }*, { double, double }** %119, align 8 + %121 = bitcast { double, double }* %120 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %121, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %122 = add i64 %116, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + ret { %Array*, %Array*, %Array* }* %94 +} + +define internal void @Lifted__PartialApplication__28__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____body(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____adj(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctl(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyMultiplexStep____ctladj(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__17__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__17__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__29__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____body(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____adj(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctl(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyGlobalRotationStep____ctladj(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__18__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__18__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %a0, { double, double }* %a1) { +entry: + %0 = bitcast { double, double }* %a0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = bitcast { double, double }* %a1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %abs0 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a0) + %abs1 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a1) + %arg0 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a0) + %arg1 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a1) + %2 = fmul double %abs0, %abs0 + %3 = fmul double %abs1, %abs1 + %d = fadd double %2, %3 + %r = call double @__quantum__qis__sqrt__body(double %d) + %4 = fadd double %arg0, %arg1 + %t = fmul double 5.000000e-01, %4 + %phi = fsub double %arg1, %arg0 + %5 = call double @__quantum__qis__arctan2__body(double %abs1, double %abs0) + %theta = fmul double 2.000000e+00, %5 + %6 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %r, double %t) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }*, double, double }* getelementptr ({ { double, double }*, double, double }, { { double, double }*, double, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { { double, double }*, double, double }* + %9 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 2 + store { double, double }* %6, { double, double }** %9, align 8 + store double %phi, double* %10, align 8 + store double %theta, double* %11, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + ret { { double, double }*, double, double }* %8 +} + +define internal %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %nQubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = trunc i64 %nQubits to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %12 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___5ac6d1808c4040b9aa3fa0e6ce75855c_Padded__body(i64 %11, { double, double }* %12, %Array* %coefficients) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %22 = icmp sgt i64 %nQubits, 1 + %23 = sub i64 %nQubits, 1 + %24 = insertvalue %Range { i64 1, i64 1, i64 0 }, i64 %23, 2 + %rngControl = select i1 %22, %Range %24, %Range { i64 1, i64 1, i64 0 } + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Range, i64 }* + %27 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 1 + store %Range %rngControl, %Range* %27, align 4 + store i64 0, i64* %28, align 4 + %plan = call %Array* @Microsoft__Quantum__Preparation____QsRef0__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficientsPadded, { %Range, i64 }* %26) + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %plan) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %31) + %34 = bitcast i8* %33 to %Callable** + %35 = load %Callable*, %Callable** %34, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %35, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 1) + %unprepare = call %Callable* @Microsoft__Quantum__Canon___55c7b8d161af40c49ac844f8a0630208_BoundCA__body(%Array* %plan) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 1) + %37 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %38 = call %Callable* @__quantum__rt__callable_copy(%Callable* %unprepare, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %38) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, %Callable* }* + %41 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 1 + store %Callable* %37, %Callable** %41, align 8 + store %Callable* %38, %Callable** %42, align 8 + %43 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__31__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__19__FunctionTable, %Tuple* %39) + %44 = sub i64 %0, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %51, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %45) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = bitcast { double, double }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %50, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %51 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %52 = sub i64 %13, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %53 = phi i64 [ 0, %exit__4 ], [ %59, %exiting__5 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %53) + %56 = bitcast i8* %55 to { double, double }** + %57 = load { double, double }*, { double, double }** %56, align 8 + %58 = bitcast { double, double }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %59 = add i64 %53, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + %60 = sub i64 %29, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %61 = phi i64 [ 0, %exit__5 ], [ %66, %exiting__6 ] + %62 = icmp sle i64 %61, %60 + br i1 %62, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %61) + %64 = bitcast i8* %63 to %Callable** + %65 = load %Callable*, %Callable** %64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %65, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %65, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %66 = add i64 %61, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 -1) + %67 = bitcast { double, double }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %67, i32 -1) + %68 = sub i64 %13, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %69 = phi i64 [ 0, %exit__6 ], [ %75, %exiting__7 ] + %70 = icmp sle i64 %69, %68 + br i1 %70, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %69) + %72 = bitcast i8* %71 to { double, double }** + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %75 = add i64 %69, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + %76 = sub i64 %29, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %77 = phi i64 [ 0, %exit__7 ], [ %82, %exiting__8 ] + %78 = icmp sle i64 %77, %76 + br i1 %78, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %77) + %80 = bitcast i8* %79 to %Callable** + %81 = load %Callable*, %Callable** %80, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %81, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %81, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %82 = add i64 %77, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unprepare, i32 -1) + ret %Callable* %43 +} + +define internal void @Lifted__PartialApplication__31__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____body(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____adj(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctl(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef0__ApplyToLittleEndian____ctladj(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @MemoryManagement__19__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__19__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %13 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %12) + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %11, %Tuple* null) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %13 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %12) + %14 = call %Callable* @__quantum__rt__callable_copy(%Callable* %13, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %14) + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %11, %Tuple* null) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %17 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %16) + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { %Array* }* }* + %21 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %21, align 8 + store { %Array* }* %qubits, { %Array* }** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %23 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %24) + %27 = bitcast i8* %26 to { double, double }** + %28 = load { double, double }*, { double, double }** %27, align 8 + %29 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %17 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %16) + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, { %Array* }* }* + %21 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %20, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %21, align 8 + store { %Array* }* %qubits, { %Array* }** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %19, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %23 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %30, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %24) + %27 = bitcast i8* %26 to { double, double }** + %28 = load { double, double }*, { double, double }** %27, align 8 + %29 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double 0.000000e+00, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__32__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %4) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %8, %Callable* %9) + %coefficientsAsComplexPolar = call %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %10, %Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsAsComplexPolar) + %12 = sub i64 %11, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %13 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double %tolerance, %Array* %coefficientsAsComplexPolar, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %20 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %21) + %24 = bitcast i8* %23 to { double, double }** + %25 = load { double, double }*, { double, double }** %24, align 8 + %26 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %28 = sub i64 %11, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Math__ComplexPolar__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { { double, double }* }* + %7 = getelementptr inbounds { { double, double }* }, { { double, double }* }* %6, i32 0, i32 0 + store { double, double }* %5, { double, double }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__20__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__20__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Math__AbsD__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = call double @Microsoft__Quantum__Math__AbsD__body(double %2) + %4 = bitcast %Tuple* %result-tuple to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + store double %3, double* %5, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + store %Callable* %3, %Callable** %6, align 8 + store double 0.000000e+00, double* %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__33__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %4) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %8, %Callable* %9) + %__qsVar0__coefficientsAsComplexPolar__ = call %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %10, %Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__) + %12 = sub i64 %11, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %13 = phi i64 [ 0, %entry ], [ %19, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double %tolerance, %Array* %__qsVar0__coefficientsAsComplexPolar__, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %20 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %21) + %24 = bitcast i8* %23 to { double, double }** + %25 = load { double, double }*, { double, double }** %24, align 8 + %26 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %28 = sub i64 %11, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double 0.000000e+00, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__34__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %8) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = call %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %12, %Callable* %13) + %coefficientsAsComplexPolar = call %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %14, %Array* %coefficients) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsAsComplexPolar) + %16 = sub i64 %15, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %17 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 1) + %24 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array*, { %Array* }* }* + %34 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 2 + store double %tolerance, double* %34, align 8 + store %Array* %coefficientsAsComplexPolar, %Array** %35, align 8 + store { %Array* }* %qubits, { %Array* }** %36, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %33) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + %37 = sub i64 %15, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %38) + %41 = bitcast i8* %40 to { double, double }** + %42 = load { double, double }*, { double, double }** %41, align 8 + %43 = bitcast { double, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + %45 = sub i64 %15, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %46) + %49 = bitcast i8* %48 to { double, double }** + %50 = load { double, double }*, { double, double }** %49, align 8 + %51 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + %53 = sub i64 %15, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %60, %exiting__5 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsAsComplexPolar, i64 %54) + %57 = bitcast i8* %56 to { double, double }** + %58 = load { double, double }*, { double, double }** %57, align 8 + %59 = bitcast { double, double }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %60 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsAsComplexPolar, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__ComplexPolar__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, double }* + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store double 0.000000e+00, double* %11, align 8 + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__35__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %8) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__AbsD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = call %Callable* @Microsoft__Quantum__Canon___aa681116ffc3482eb00c223eb7ada15f_Compose__body(%Callable* %12, %Callable* %13) + %__qsVar0__coefficientsAsComplexPolar__ = call %Array* @Microsoft__Quantum__Arrays___bce10a946d1b466781aeb2785d88e6e2_Mapped__body(%Callable* %14, %Array* %coefficients) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__) + %16 = sub i64 %15, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %17 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + %24 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { double, %Array*, { %Array* }* }* + %34 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %33, i32 0, i32 2 + store double %tolerance, double* %34, align 8 + store %Array* %__qsVar0__coefficientsAsComplexPolar__, %Array** %35, align 8 + store { %Array* }* %qubits, { %Array* }** %36, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %33) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + %37 = sub i64 %15, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %38) + %41 = bitcast i8* %40 to { double, double }** + %42 = load { double, double }*, { double, double }** %41, align 8 + %43 = bitcast { double, double }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + %45 = sub i64 %15, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %46) + %49 = bitcast i8* %48 to { double, double }** + %50 = load { double, double }*, { double, double }** %49, align 8 + %51 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + %53 = sub i64 %15, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %60, %exiting__5 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsAsComplexPolar__, i64 %54) + %57 = bitcast i8* %56 to { double, double }** + %58 = load { double, double }*, { double, double }** %57, align 8 + %59 = bitcast { double, double }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %60 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsAsComplexPolar__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double }* + %1 = getelementptr inbounds { double }, { double }* %0, i32 0, i32 0 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__body(%Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__body(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__adj(%Array* %coefficients, { %Array* }* %qubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__adj(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %4) + %7 = bitcast i8* %6 to { double, double }** + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %11, align 8 + %12 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { double, %Array*, { %Array* }* }* + %25 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 2 + store double 0.000000e+00, double* %25, align 8 + store %Array* %coefficients, %Array** %26, align 8 + store { %Array* }* %qubits, { %Array* }** %27, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %36 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %43, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %37) + %40 = bitcast i8* %39 to { double, double }** + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %43 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %4) + %7 = bitcast i8* %6 to { double, double }** + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %11, align 8 + %12 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %16) + %19 = bitcast i8* %18 to { double, double }** + %20 = load { double, double }*, { double, double }** %19, align 8 + %21 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { double, %Array*, { %Array* }* }* + %25 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 1 + %27 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %24, i32 0, i32 2 + store double 0.000000e+00, double* %25, align 8 + store %Array* %coefficients, %Array** %26, align 8 + store { %Array* }* %qubits, { %Array* }** %27, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateCP__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %24) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %36 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %43, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %37) + %40 = bitcast i8* %39 to { double, double }** + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %43 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__body(%Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__body(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__adj(double 0.000000e+00, %Array* %coefficients, { %Array* }* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array*, { %Array* }* }* + %8 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 2 + store double 0.000000e+00, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %qubits, { %Array* }** %10, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %7) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %coefficients = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %qubits = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array*, { %Array* }* }* + %8 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 1 + %10 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %7, i32 0, i32 2 + store double 0.000000e+00, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %qubits, { %Array* }** %10, align 8 + call void @Microsoft__Quantum__Preparation__ApproximatelyPrepareArbitraryStateD__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %7) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__body(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__adj(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__h__ctl(%Array*, %Qubit*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +declare %Result* @__quantum__rt__result_get_one() + +define internal void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare void @__quantum__qis__s__body(%Qubit*) + +declare void @__quantum__qis__s__adj(%Qubit*) + +declare void @__quantum__qis__s__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__s__ctladj(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %p = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %q = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %12 = bitcast i8* %11 to i64* + %r = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %14 = bitcast i8* %13 to i64* + %s = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %18 = fmul double 1.250000e-01, %17 + %angle = fmul double %18, %stepSize + %19 = icmp eq i64 %p, %q + br i1 %19, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %20 = icmp eq i64 %p, %r + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %21 = phi i1 [ %19, %entry ], [ %20, %condFalse__1 ] + br i1 %21, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %22 = icmp eq i64 %p, %s + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %23 = phi i1 [ %21, %condContinue__1 ], [ %22, %condFalse__2 ] + br i1 %23, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %24 = icmp eq i64 %q, %r + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %25 = phi i1 [ %23, %condContinue__2 ], [ %24, %condFalse__3 ] + br i1 %25, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %26 = icmp eq i64 %q, %s + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %27 = phi i1 [ %25, %condContinue__3 ], [ %26, %condFalse__4 ] + br i1 %27, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %28 = icmp eq i64 %r, %s + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %29 = phi i1 [ %27, %condContinue__4 ], [ %28, %condFalse__5 ] + br i1 %29, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %31 = call %String* @__quantum__rt__int_to_string(i64 %p) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__int_to_string(i64 %q) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %r) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %s) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %46) + unreachable + +continue__1: ; preds = %condContinue__5 + %47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 1) + %51 = bitcast i8* %50 to i2* + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 2) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 3) + %55 = bitcast i8* %54 to i2* + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + store i2 1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i2* + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 2) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 3) + %64 = bitcast i8* %63 to i2* + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + store i2 1, i2* %62, align 1 + store i2 -1, i2* %64, align 1 + %65 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i2* + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 2) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 3) + %73 = bitcast i8* %72 to i2* + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + store i2 -1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + %74 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 2) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 3) + %82 = bitcast i8* %81 to i2* + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + store i2 -1, i2* %80, align 1 + store i2 -1, i2* %82, align 1 + %83 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 0) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 1) + %87 = bitcast i8* %86 to i2* + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 2) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 3) + %91 = bitcast i8* %90 to i2* + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + store i2 1, i2* %89, align 1 + store i2 1, i2* %91, align 1 + %92 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i2* + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 2) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 3) + %100 = bitcast i8* %99 to i2* + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + store i2 1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + %101 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i2* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i2* + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 2) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 3) + %109 = bitcast i8* %108 to i2* + store i2 -1, i2* %103, align 1 + store i2 -1, i2* %105, align 1 + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + %110 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i2* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i2* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %112, align 1 + store i2 1, i2* %114, align 1 + store i2 -1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %134 = bitcast i8* %133 to %Array** + store %Array* %47, %Array** %120, align 8 + store %Array* %56, %Array** %122, align 8 + store %Array* %65, %Array** %124, align 8 + store %Array* %74, %Array** %126, align 8 + store %Array* %83, %Array** %128, align 8 + store %Array* %92, %Array** %130, align 8 + store %Array* %101, %Array** %132, align 8 + store %Array* %110, %Array** %134, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %135 = phi i64 [ 0, %continue__1 ], [ %140, %exiting__1 ] + %136 = icmp sle i64 %135, 7 + br i1 %136, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %135) + %138 = bitcast i8* %137 to %Array** + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %140 = add i64 %135, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %141 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 0) + %143 = bitcast i8* %142 to i64* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 1) + %145 = bitcast i8* %144 to i64* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 2) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 3) + %149 = bitcast i8* %148 to i64* + store i64 %p, i64* %143, align 4 + store i64 %q, i64* %145, align 4 + store i64 %r, i64* %147, align 4 + store i64 %s, i64* %149, align 4 + %150 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %141) + %151 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 0 + %sortedIndices = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 1) + %152 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 1 + %signs = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %153 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 2 + %globalSign = load double, double* %153, align 8 + %154 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %ops, %Array* %signs) + %155 = call i64 @__quantum__rt__array_get_size_1d(%Array* %154) + %156 = sub i64 %155, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %157 = phi i64 [ 0, %exit__1 ], [ %166, %exiting__2 ] + %158 = icmp sle i64 %157, %156 + br i1 %158, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %159 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %157) + %160 = bitcast i8* %159 to { %Array*, double }** + %161 = load { %Array*, double }*, { %Array*, double }** %160, align 8 + %162 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %161, i32 0, i32 0 + %op = load %Array*, %Array** %162, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %163 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %161, i32 0, i32 1 + %sign = load double, double* %163, align 8 + %164 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %164, %Array* %sortedIndices, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %165 = fmul double %globalSign, %sign + %theta = fmul double %165, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %pauliString, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %166 = add i64 %157, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %167 = phi i64 [ 0, %exit__2 ], [ %172, %exiting__3 ] + %168 = icmp sle i64 %167, 7 + br i1 %168, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %167) + %170 = bitcast i8* %169 to %Array** + %171 = load %Array*, %Array** %170, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %171, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %172 = add i64 %167, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %173 = phi i64 [ 0, %exit__3 ], [ %178, %exiting__4 ] + %174 = icmp sle i64 %173, 7 + br i1 %174, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %175 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %173) + %176 = bitcast i8* %175 to %Array** + %177 = load %Array*, %Array** %176, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %177, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %178 = add i64 %173, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %179 = bitcast { %Array*, %Array*, double }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %179, i32 -1) + %180 = sub i64 %155, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %181 = phi i64 [ 0, %exit__4 ], [ %189, %exiting__5 ] + %182 = icmp sle i64 %181, %180 + br i1 %182, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %181) + %184 = bitcast i8* %183 to { %Array*, double }** + %185 = load { %Array*, double }*, { %Array*, double }** %184, align 8 + %186 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %185, i32 0, i32 0 + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %187, i32 -1) + %188 = bitcast { %Array*, double }* %185 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %189 = add i64 %181, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + ret void +} + +define internal { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %indices) { +entry: + %sign = alloca double, align 8 + %signs = alloca %Array*, align 8 + %sorted = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %1 = bitcast i8* %0 to i64* + %p = load i64, i64* %1, align 4 + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %3 = bitcast i8* %2 to i64* + %q = load i64, i64* %3, align 4 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 2) + %5 = bitcast i8* %4 to i64* + %r = load i64, i64* %5, align 4 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %7 = bitcast i8* %6 to i64* + %s = load i64, i64* %7, align 4 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 0) + %10 = bitcast i8* %9 to i64* + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 1) + %12 = bitcast i8* %11 to i64* + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 2) + %14 = bitcast i8* %13 to i64* + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 3) + %16 = bitcast i8* %15 to i64* + store i64 0, i64* %10, align 4 + store i64 0, i64* %12, align 4 + store i64 0, i64* %14, align 4 + store i64 0, i64* %16, align 4 + store %Array* %8, %Array** %sorted, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %17 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 0) + %19 = bitcast i8* %18 to double* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 1) + %21 = bitcast i8* %20 to double* + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 2) + %23 = bitcast i8* %22 to double* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 3) + %25 = bitcast i8* %24 to double* + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 4) + %27 = bitcast i8* %26 to double* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 5) + %29 = bitcast i8* %28 to double* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 6) + %31 = bitcast i8* %30 to double* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 7) + %33 = bitcast i8* %32 to double* + store double 0.000000e+00, double* %19, align 8 + store double 0.000000e+00, double* %21, align 8 + store double 0.000000e+00, double* %23, align 8 + store double 0.000000e+00, double* %25, align 8 + store double 0.000000e+00, double* %27, align 8 + store double 0.000000e+00, double* %29, align 8 + store double 0.000000e+00, double* %31, align 8 + store double 0.000000e+00, double* %33, align 8 + store %Array* %17, %Array** %signs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + store double 1.000000e+00, double* %sign, align 8 + %34 = icmp sgt i64 %p, %q + br i1 %34, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + store double -1.000000e+00, double* %sign, align 8 + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %35 = icmp sgt i64 %r, %s + br i1 %35, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %36 = load double, double* %sign, align 8 + %37 = fmul double %36, -1.000000e+00 + store double %37, double* %sign, align 8 + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i64* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 1) + %42 = bitcast i8* %41 to i64* + store i64 %p, i64* %40, align 4 + store i64 %q, i64* %42, align 4 + %43 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %38) + %44 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 0) + %46 = bitcast i8* %45 to i64* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 1) + %48 = bitcast i8* %47 to i64* + store i64 %r, i64* %46, align 4 + store i64 %s, i64* %48, align 4 + %49 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %44) + %50 = icmp sgt i64 %43, %49 + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + br i1 %50, label %then0__3, label %else__1 + +then0__3: ; preds = %continue__2 + %51 = load double, double* %sign, align 8 + %52 = fmul double %51, -1.000000e+00 + store double %52, double* %sign, align 8 + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to i64* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 1) + %57 = bitcast i8* %56 to i64* + store i64 %r, i64* %55, align 4 + store i64 %s, i64* %57, align 4 + %58 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %53) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + %59 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 0) + %61 = bitcast i8* %60 to i64* + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 1) + %63 = bitcast i8* %62 to i64* + store i64 %r, i64* %61, align 4 + store i64 %s, i64* %63, align 4 + %64 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %59) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + %65 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i64* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i64* + store i64 %p, i64* %67, align 4 + store i64 %q, i64* %69, align 4 + %70 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %65) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + %71 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 0) + %73 = bitcast i8* %72 to i64* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 1) + %75 = bitcast i8* %74 to i64* + store i64 %p, i64* %73, align 4 + store i64 %q, i64* %75, align 4 + %76 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %71) + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + %77 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 0) + %79 = bitcast i8* %78 to i64* + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 1) + %81 = bitcast i8* %80 to i64* + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 2) + %83 = bitcast i8* %82 to i64* + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %77, i64 3) + %85 = bitcast i8* %84 to i64* + store i64 %58, i64* %79, align 4 + store i64 %64, i64* %81, align 4 + store i64 %70, i64* %83, align 4 + store i64 %76, i64* %85, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %77, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + store %Array* %77, %Array** %sorted, align 8 + br label %continue__3 + +else__1: ; preds = %continue__2 + %86 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 0) + %88 = bitcast i8* %87 to i64* + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 1) + %90 = bitcast i8* %89 to i64* + store i64 %p, i64* %88, align 4 + store i64 %q, i64* %90, align 4 + %91 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %86) + call void @__quantum__rt__array_update_reference_count(%Array* %86, i32 -1) + %92 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i64* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i64* + store i64 %p, i64* %94, align 4 + store i64 %q, i64* %96, align 4 + %97 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %92) + call void @__quantum__rt__array_update_reference_count(%Array* %92, i32 -1) + %98 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 0) + %100 = bitcast i8* %99 to i64* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 1) + %102 = bitcast i8* %101 to i64* + store i64 %r, i64* %100, align 4 + store i64 %s, i64* %102, align 4 + %103 = call i64 @Microsoft__Quantum__Math__Min__body(%Array* %98) + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + %104 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 0) + %106 = bitcast i8* %105 to i64* + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 1) + %108 = bitcast i8* %107 to i64* + store i64 %r, i64* %106, align 4 + store i64 %s, i64* %108, align 4 + %109 = call i64 @Microsoft__Quantum__Math__Max__body(%Array* %104) + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + %110 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i64* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i64* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i64* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i64* + store i64 %91, i64* %112, align 4 + store i64 %97, i64* %114, align 4 + store i64 %103, i64* %116, align 4 + store i64 %109, i64* %118, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %110, i32 1) + %119 = load %Array*, %Array** %sorted, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %119, i32 -1) + store %Array* %110, %Array** %sorted, align 8 + br label %continue__3 + +continue__3: ; preds = %else__1, %then0__3 + %120 = load %Array*, %Array** %sorted, align 8 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 0) + %122 = bitcast i8* %121 to i64* + %p1 = load i64, i64* %122, align 4 + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 1) + %124 = bitcast i8* %123 to i64* + %q1 = load i64, i64* %124, align 4 + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 2) + %126 = bitcast i8* %125 to i64* + %r1 = load i64, i64* %126, align 4 + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %120, i64 3) + %128 = bitcast i8* %127 to i64* + %s1 = load i64, i64* %128, align 4 + %129 = icmp slt i64 %q1, %r1 + br i1 %129, label %then0__4, label %test1__1 + +then0__4: ; preds = %continue__3 + %130 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 0) + %132 = bitcast i8* %131 to i64* + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 1) + %134 = bitcast i8* %133 to i64* + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 2) + %136 = bitcast i8* %135 to i64* + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %130, i64 3) + %138 = bitcast i8* %137 to i64* + store i64 %p1, i64* %132, align 4 + store i64 %q1, i64* %134, align 4 + store i64 %r1, i64* %136, align 4 + store i64 %s1, i64* %138, align 4 + %139 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %140 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 0) + %141 = bitcast i8* %140 to double* + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 1) + %143 = bitcast i8* %142 to double* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 2) + %145 = bitcast i8* %144 to double* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 3) + %147 = bitcast i8* %146 to double* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 4) + %149 = bitcast i8* %148 to double* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 5) + %151 = bitcast i8* %150 to double* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 6) + %153 = bitcast i8* %152 to double* + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %139, i64 7) + %155 = bitcast i8* %154 to double* + store double 1.000000e+00, double* %141, align 8 + store double -1.000000e+00, double* %143, align 8 + store double -1.000000e+00, double* %145, align 8 + store double -1.000000e+00, double* %147, align 8 + store double 1.000000e+00, double* %149, align 8 + store double 1.000000e+00, double* %151, align 8 + store double 1.000000e+00, double* %153, align 8 + store double -1.000000e+00, double* %155, align 8 + %156 = load double, double* %sign, align 8 + %157 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %158 = bitcast %Tuple* %157 to { %Array*, %Array*, double }* + %159 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 0 + %160 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 1 + %161 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %158, i32 0, i32 2 + store %Array* %130, %Array** %159, align 8 + store %Array* %139, %Array** %160, align 8 + store double %156, double* %161, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %158 + +test1__1: ; preds = %continue__3 + %162 = icmp sgt i64 %q1, %r1 + br i1 %162, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %test1__1 + %163 = icmp slt i64 %q1, %s1 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %test1__1 + %164 = phi i1 [ %163, %condTrue__1 ], [ %162, %test1__1 ] + br i1 %164, label %then1__1, label %test2__1 + +then1__1: ; preds = %condContinue__1 + %165 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 0) + %167 = bitcast i8* %166 to i64* + %168 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 1) + %169 = bitcast i8* %168 to i64* + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 2) + %171 = bitcast i8* %170 to i64* + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 3) + %173 = bitcast i8* %172 to i64* + store i64 %p1, i64* %167, align 4 + store i64 %r1, i64* %169, align 4 + store i64 %q1, i64* %171, align 4 + store i64 %s1, i64* %173, align 4 + %174 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %175 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 0) + %176 = bitcast i8* %175 to double* + %177 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 1) + %178 = bitcast i8* %177 to double* + %179 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 2) + %180 = bitcast i8* %179 to double* + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 3) + %182 = bitcast i8* %181 to double* + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 4) + %184 = bitcast i8* %183 to double* + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 5) + %186 = bitcast i8* %185 to double* + %187 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 6) + %188 = bitcast i8* %187 to double* + %189 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %174, i64 7) + %190 = bitcast i8* %189 to double* + store double -1.000000e+00, double* %176, align 8 + store double -1.000000e+00, double* %178, align 8 + store double -1.000000e+00, double* %180, align 8 + store double 1.000000e+00, double* %182, align 8 + store double -1.000000e+00, double* %184, align 8 + store double 1.000000e+00, double* %186, align 8 + store double 1.000000e+00, double* %188, align 8 + store double 1.000000e+00, double* %190, align 8 + %191 = load double, double* %sign, align 8 + %192 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %193 = bitcast %Tuple* %192 to { %Array*, %Array*, double }* + %194 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 0 + %195 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 1 + %196 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %193, i32 0, i32 2 + store %Array* %165, %Array** %194, align 8 + store %Array* %174, %Array** %195, align 8 + store double %191, double* %196, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %193 + +test2__1: ; preds = %condContinue__1 + %197 = icmp sgt i64 %q1, %r1 + br i1 %197, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %test2__1 + %198 = icmp sgt i64 %q1, %s1 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %test2__1 + %199 = phi i1 [ %198, %condTrue__2 ], [ %197, %test2__1 ] + br i1 %199, label %then2__1, label %else__2 + +then2__1: ; preds = %condContinue__2 + %200 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 0) + %202 = bitcast i8* %201 to i64* + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 1) + %204 = bitcast i8* %203 to i64* + %205 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 2) + %206 = bitcast i8* %205 to i64* + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %200, i64 3) + %208 = bitcast i8* %207 to i64* + store i64 %p1, i64* %202, align 4 + store i64 %r1, i64* %204, align 4 + store i64 %s1, i64* %206, align 4 + store i64 %q1, i64* %208, align 4 + %209 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 0) + %211 = bitcast i8* %210 to double* + %212 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 1) + %213 = bitcast i8* %212 to double* + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 2) + %215 = bitcast i8* %214 to double* + %216 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 3) + %217 = bitcast i8* %216 to double* + %218 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 4) + %219 = bitcast i8* %218 to double* + %220 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 5) + %221 = bitcast i8* %220 to double* + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 6) + %223 = bitcast i8* %222 to double* + %224 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %209, i64 7) + %225 = bitcast i8* %224 to double* + store double 1.000000e+00, double* %211, align 8 + store double 1.000000e+00, double* %213, align 8 + store double -1.000000e+00, double* %215, align 8 + store double 1.000000e+00, double* %217, align 8 + store double -1.000000e+00, double* %219, align 8 + store double 1.000000e+00, double* %221, align 8 + store double -1.000000e+00, double* %223, align 8 + store double -1.000000e+00, double* %225, align 8 + %226 = load double, double* %sign, align 8 + %227 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %228 = bitcast %Tuple* %227 to { %Array*, %Array*, double }* + %229 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 0 + %230 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 1 + %231 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %228, i32 0, i32 2 + store %Array* %200, %Array** %229, align 8 + store %Array* %209, %Array** %230, align 8 + store double %226, double* %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret { %Array*, %Array*, double }* %228 + +else__2: ; preds = %condContinue__2 + %232 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([47 x i8], [47 x i8]* @29, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__fail(%String* %232) + unreachable + +continue__4: ; No predecessors! + unreachable +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %nFermions, %Array* %idxFermions, %Array* %pauliReplacements) { +entry: + %pauliString = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliReplacements, i32 1) + %0 = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliZString__body(i64 %nFermions, %Array* %idxFermions) + store %Array* %0, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %1 = call %Range @Microsoft__Quantum__Arrays___d58849b717694e4ca69317572366b289_IndexRange__body(%Array* %idxFermions) + %2 = extractvalue %Range %1, 0 + %3 = extractvalue %Range %1, 1 + %4 = extractvalue %Range %1, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %5 = icmp sgt i64 %3, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %2, %preheader__1 ], [ %17, %exiting__1 ] + %6 = icmp sle i64 %idx, %4 + %7 = icmp sge i64 %idx, %4 + %8 = select i1 %5, i1 %6, i1 %7 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %idx) + %10 = bitcast i8* %9 to i64* + %idxFermion = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %pauliReplacements, i64 %idx) + %12 = bitcast i8* %11 to i2* + %op = load i2, i2* %12, align 1 + %13 = load %Array*, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 -1) + %14 = call %Array* @__quantum__rt__array_copy(%Array* %13, i1 false) + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %idxFermion) + %16 = bitcast i8* %15 to i2* + store i2 %op, i2* %16, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + store %Array* %14, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %idx, %3 + br label %header__1 + +exit__1: ; preds = %header__1 + %18 = load %Array*, %Array** %pauliString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliReplacements, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + ret %Array* %18 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__p__ = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %__qsVar4__q__ = load i64, i64* %10, align 4 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %12 = bitcast i8* %11 to i64* + %__qsVar5__r__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %14 = bitcast i8* %13 to i64* + %__qsVar6__s__ = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %18 = fmul double 1.250000e-01, %17 + %__qsVar7__angle__ = fmul double %18, %stepSize + %19 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %19, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %20 = icmp eq i64 %__qsVar3__p__, %__qsVar5__r__ + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %21 = phi i1 [ %19, %entry ], [ %20, %condFalse__1 ] + br i1 %21, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %22 = icmp eq i64 %__qsVar3__p__, %__qsVar6__s__ + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %23 = phi i1 [ %21, %condContinue__1 ], [ %22, %condFalse__2 ] + br i1 %23, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %24 = icmp eq i64 %__qsVar4__q__, %__qsVar5__r__ + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %25 = phi i1 [ %23, %condContinue__2 ], [ %24, %condFalse__3 ] + br i1 %25, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %26 = icmp eq i64 %__qsVar4__q__, %__qsVar6__s__ + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %27 = phi i1 [ %25, %condContinue__3 ], [ %26, %condFalse__4 ] + br i1 %27, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %28 = icmp eq i64 %__qsVar5__r__, %__qsVar6__s__ + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %29 = phi i1 [ %27, %condContinue__4 ], [ %28, %condFalse__5 ] + br i1 %29, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %31 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar5__r__) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar6__s__) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %46) + unreachable + +continue__1: ; preds = %condContinue__5 + %47 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 0) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 1) + %51 = bitcast i8* %50 to i2* + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 2) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 3) + %55 = bitcast i8* %54 to i2* + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + store i2 1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + %56 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 0) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 1) + %60 = bitcast i8* %59 to i2* + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 2) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 3) + %64 = bitcast i8* %63 to i2* + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + store i2 1, i2* %62, align 1 + store i2 -1, i2* %64, align 1 + %65 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 0) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 1) + %69 = bitcast i8* %68 to i2* + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 2) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 3) + %73 = bitcast i8* %72 to i2* + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + store i2 -1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + %74 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 0) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 1) + %78 = bitcast i8* %77 to i2* + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 2) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 3) + %82 = bitcast i8* %81 to i2* + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + store i2 -1, i2* %80, align 1 + store i2 -1, i2* %82, align 1 + %83 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 0) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 1) + %87 = bitcast i8* %86 to i2* + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 2) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 3) + %91 = bitcast i8* %90 to i2* + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + store i2 1, i2* %89, align 1 + store i2 1, i2* %91, align 1 + %92 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 0) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 1) + %96 = bitcast i8* %95 to i2* + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 2) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %92, i64 3) + %100 = bitcast i8* %99 to i2* + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + store i2 1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + %101 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %102 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 0) + %103 = bitcast i8* %102 to i2* + %104 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 1) + %105 = bitcast i8* %104 to i2* + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 2) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 3) + %109 = bitcast i8* %108 to i2* + store i2 -1, i2* %103, align 1 + store i2 -1, i2* %105, align 1 + store i2 -1, i2* %107, align 1 + store i2 1, i2* %109, align 1 + %110 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 0) + %112 = bitcast i8* %111 to i2* + %113 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 1) + %114 = bitcast i8* %113 to i2* + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 2) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %110, i64 3) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %112, align 1 + store i2 1, i2* %114, align 1 + store i2 -1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %__qsVar10__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 0) + %120 = bitcast i8* %119 to %Array** + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 1) + %122 = bitcast i8* %121 to %Array** + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 2) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 3) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 4) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 5) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 6) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 7) + %134 = bitcast i8* %133 to %Array** + store %Array* %47, %Array** %120, align 8 + store %Array* %56, %Array** %122, align 8 + store %Array* %65, %Array** %124, align 8 + store %Array* %74, %Array** %126, align 8 + store %Array* %83, %Array** %128, align 8 + store %Array* %92, %Array** %130, align 8 + store %Array* %101, %Array** %132, align 8 + store %Array* %110, %Array** %134, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %135 = phi i64 [ 0, %continue__1 ], [ %140, %exiting__1 ] + %136 = icmp sle i64 %135, 7 + br i1 %136, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %135) + %138 = bitcast i8* %137 to %Array** + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %140 = add i64 %135, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 1) + %141 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 0) + %143 = bitcast i8* %142 to i64* + %144 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 1) + %145 = bitcast i8* %144 to i64* + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 2) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %141, i64 3) + %149 = bitcast i8* %148 to i64* + store i64 %__qsVar3__p__, i64* %143, align 4 + store i64 %__qsVar4__q__, i64* %145, align 4 + store i64 %__qsVar5__r__, i64* %147, align 4 + store i64 %__qsVar6__s__, i64* %149, align 4 + %150 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %141) + %151 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 0 + %__qsVar11__sortedIndices__ = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 1) + %152 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 1 + %__qsVar12__signs__ = load %Array*, %Array** %152, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 1) + %153 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %150, i32 0, i32 2 + %__qsVar13__globalSign__ = load double, double* %153, align 8 + %154 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %155 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %156 = call i64 @__quantum__rt__array_get_size_1d(%Array* %155) + %157 = sub i64 %156, 1 + %158 = insertvalue %Range zeroinitializer, i64 %157, 0 + %159 = insertvalue %Range %158, i64 -1, 1 + %160 = insertvalue %Range %159, i64 0, 2 + %161 = call %Array* @__quantum__rt__array_slice_1d(%Array* %154, %Range %160, i1 true) + %162 = call i64 @__quantum__rt__array_get_size_1d(%Array* %161) + %163 = sub i64 %162, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %164 = phi i64 [ 0, %exit__1 ], [ %173, %exiting__2 ] + %165 = icmp sle i64 %164, %163 + br i1 %165, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 %164) + %167 = bitcast i8* %166 to { %Array*, double }** + %168 = load { %Array*, double }*, { %Array*, double }** %167, align 8 + %169 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %168, i32 0, i32 0 + %__qsVar14__op__ = load %Array*, %Array** %169, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 1) + %170 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %168, i32 0, i32 1 + %__qsVar15__sign__ = load double, double* %170, align 8 + %171 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar16__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %171, %Array* %__qsVar11__sortedIndices__, %Array* %__qsVar14__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + %172 = fmul double %__qsVar13__globalSign__, %__qsVar15__sign__ + %theta = fmul double %172, %__qsVar7__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %__qsVar16__pauliString__, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %173 = add i64 %164, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %174 = phi i64 [ 0, %exit__2 ], [ %179, %exiting__3 ] + %175 = icmp sle i64 %174, 7 + br i1 %175, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %176 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %174) + %177 = bitcast i8* %176 to %Array** + %178 = load %Array*, %Array** %177, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %178, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %179 = add i64 %174, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %180 = phi i64 [ 0, %exit__3 ], [ %185, %exiting__4 ] + %181 = icmp sle i64 %180, 7 + br i1 %181, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %182 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %180) + %183 = bitcast i8* %182 to %Array** + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %184, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %185 = add i64 %180, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar12__signs__, i32 -1) + %186 = bitcast { %Array*, %Array*, double }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %186, i32 -1) + %187 = call i64 @__quantum__rt__array_get_size_1d(%Array* %154) + %188 = sub i64 %187, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %189 = phi i64 [ 0, %exit__4 ], [ %197, %exiting__5 ] + %190 = icmp sle i64 %189, %188 + br i1 %190, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %154, i64 %189) + %192 = bitcast i8* %191 to { %Array*, double }** + %193 = load { %Array*, double }*, { %Array*, double }** %192, align 8 + %194 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %193, i32 0, i32 0 + %195 = load %Array*, %Array** %194, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %195, i32 -1) + %196 = bitcast { %Array*, double }* %193 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %196, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %197 = add i64 %189, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + %198 = sub i64 %156, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %199 = phi i64 [ 0, %exit__5 ], [ %207, %exiting__6 ] + %200 = icmp sle i64 %199, %198 + br i1 %200, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %201 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %155, i64 %199) + %202 = bitcast i8* %201 to { %Array*, double }** + %203 = load { %Array*, double }*, { %Array*, double }** %202, align 8 + %204 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %203, i32 0, i32 0 + %205 = load %Array*, %Array** %204, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %205, i32 -1) + %206 = bitcast { %Array*, double }* %203 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %206, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %207 = add i64 %199, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %155, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %161, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %p = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %q = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 2) + %16 = bitcast i8* %15 to i64* + %r = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 3) + %18 = bitcast i8* %17 to i64* + %s = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = fmul double 1.250000e-01, %21 + %angle = fmul double %22, %stepSize + %23 = icmp eq i64 %p, %q + br i1 %23, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %24 = icmp eq i64 %p, %r + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %25 = phi i1 [ %23, %entry ], [ %24, %condFalse__1 ] + br i1 %25, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %26 = icmp eq i64 %p, %s + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %27 = phi i1 [ %25, %condContinue__1 ], [ %26, %condFalse__2 ] + br i1 %27, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %28 = icmp eq i64 %q, %r + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %29 = phi i1 [ %27, %condContinue__2 ], [ %28, %condFalse__3 ] + br i1 %29, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %30 = icmp eq i64 %q, %s + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %31 = phi i1 [ %29, %condContinue__3 ], [ %30, %condFalse__4 ] + br i1 %31, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %32 = icmp eq i64 %r, %s + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %33 = phi i1 [ %31, %condContinue__4 ], [ %32, %condFalse__5 ] + br i1 %33, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__int_to_string(i64 %p) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %q) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %r) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__int_to_string(i64 %s) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %50) + unreachable + +continue__1: ; preds = %condContinue__5 + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 -1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 -1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 -1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 -1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 1, i2* %93, align 1 + store i2 1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 -1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %139 = phi i64 [ 0, %continue__1 ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %145 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 0) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 1) + %149 = bitcast i8* %148 to i64* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 2) + %151 = bitcast i8* %150 to i64* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 3) + %153 = bitcast i8* %152 to i64* + store i64 %p, i64* %147, align 4 + store i64 %q, i64* %149, align 4 + store i64 %r, i64* %151, align 4 + store i64 %s, i64* %153, align 4 + %154 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %145) + %155 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 0 + %sortedIndices = load %Array*, %Array** %155, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 1) + %156 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 1 + %signs = load %Array*, %Array** %156, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %157 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 2 + %globalSign = load double, double* %157, align 8 + %158 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %ops, %Array* %signs) + %159 = call i64 @__quantum__rt__array_get_size_1d(%Array* %158) + %160 = sub i64 %159, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %161 = phi i64 [ 0, %exit__1 ], [ %175, %exiting__2 ] + %162 = icmp sle i64 %161, %160 + br i1 %162, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %163 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %161) + %164 = bitcast i8* %163 to { %Array*, double }** + %165 = load { %Array*, double }*, { %Array*, double }** %164, align 8 + %166 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %165, i32 0, i32 0 + %op = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %167 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %165, i32 0, i32 1 + %sign = load double, double* %167, align 8 + %168 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %168, %Array* %sortedIndices, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %169 = fmul double %globalSign, %sign + %theta = fmul double %169, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %170 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %171 = bitcast %Tuple* %170 to { %Array*, double, %Array* }* + %172 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 0 + %173 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 1 + %174 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %171, i32 0, i32 2 + store %Array* %pauliString, %Array** %172, align 8 + store double %theta, double* %173, align 8 + store %Array* %qubits, %Array** %174, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %171) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %170, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %175 = add i64 %161, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %176 = phi i64 [ 0, %exit__2 ], [ %181, %exiting__3 ] + %177 = icmp sle i64 %176, 7 + br i1 %177, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %176) + %179 = bitcast i8* %178 to %Array** + %180 = load %Array*, %Array** %179, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %180, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %181 = add i64 %176, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %182 = phi i64 [ 0, %exit__3 ], [ %187, %exiting__4 ] + %183 = icmp sle i64 %182, 7 + br i1 %183, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %184 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %182) + %185 = bitcast i8* %184 to %Array** + %186 = load %Array*, %Array** %185, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %186, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %187 = add i64 %182, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %sortedIndices, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %188 = bitcast { %Array*, %Array*, double }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + %189 = sub i64 %159, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %190 = phi i64 [ 0, %exit__4 ], [ %198, %exiting__5 ] + %191 = icmp sle i64 %190, %189 + br i1 %191, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %190) + %193 = bitcast i8* %192 to { %Array*, double }** + %194 = load { %Array*, double }*, { %Array*, double }** %193, align 8 + %195 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %194, i32 0, i32 0 + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %196, i32 -1) + %197 = bitcast { %Array*, double }* %194 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %197, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %198 = add i64 %190, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %158, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__p__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %__qsVar4__q__ = load i64, i64* %14, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 2) + %16 = bitcast i8* %15 to i64* + %__qsVar5__r__ = load i64, i64* %16, align 4 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 3) + %18 = bitcast i8* %17 to i64* + %__qsVar6__s__ = load i64, i64* %18, align 4 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = fmul double 1.250000e-01, %21 + %__qsVar7__angle__ = fmul double %22, %stepSize + %23 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %23, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %entry + %24 = icmp eq i64 %__qsVar3__p__, %__qsVar5__r__ + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %entry + %25 = phi i1 [ %23, %entry ], [ %24, %condFalse__1 ] + br i1 %25, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %26 = icmp eq i64 %__qsVar3__p__, %__qsVar6__s__ + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %27 = phi i1 [ %25, %condContinue__1 ], [ %26, %condFalse__2 ] + br i1 %27, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %28 = icmp eq i64 %__qsVar4__q__, %__qsVar5__r__ + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %29 = phi i1 [ %27, %condContinue__2 ], [ %28, %condFalse__3 ] + br i1 %29, label %condContinue__4, label %condFalse__4 + +condFalse__4: ; preds = %condContinue__3 + %30 = icmp eq i64 %__qsVar4__q__, %__qsVar6__s__ + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condContinue__3 + %31 = phi i1 [ %29, %condContinue__3 ], [ %30, %condFalse__4 ] + br i1 %31, label %condContinue__5, label %condFalse__5 + +condFalse__5: ; preds = %condContinue__4 + %32 = icmp eq i64 %__qsVar5__r__, %__qsVar6__s__ + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__5, %condContinue__4 + %33 = phi i1 [ %31, %condContinue__4 ], [ %32, %condFalse__5 ] + br i1 %33, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__5 + %34 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @22, i32 0, i32 0)) + %35 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %42 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %41) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %41, i32 -1) + %43 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar5__r__) + %44 = call %String* @__quantum__rt__string_concatenate(%String* %42, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar6__s__) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %50) + unreachable + +continue__1: ; preds = %condContinue__5 + %51 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to i2* + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 1) + %55 = bitcast i8* %54 to i2* + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 2) + %57 = bitcast i8* %56 to i2* + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 3) + %59 = bitcast i8* %58 to i2* + store i2 -1, i2* %53, align 1 + store i2 -1, i2* %55, align 1 + store i2 1, i2* %57, align 1 + store i2 -1, i2* %59, align 1 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i2* + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 1) + %64 = bitcast i8* %63 to i2* + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 2) + %66 = bitcast i8* %65 to i2* + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 3) + %68 = bitcast i8* %67 to i2* + store i2 1, i2* %62, align 1 + store i2 1, i2* %64, align 1 + store i2 1, i2* %66, align 1 + store i2 -1, i2* %68, align 1 + %69 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 0) + %71 = bitcast i8* %70 to i2* + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 1) + %73 = bitcast i8* %72 to i2* + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 2) + %75 = bitcast i8* %74 to i2* + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 3) + %77 = bitcast i8* %76 to i2* + store i2 1, i2* %71, align 1 + store i2 -1, i2* %73, align 1 + store i2 -1, i2* %75, align 1 + store i2 -1, i2* %77, align 1 + %78 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to i2* + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 1) + %82 = bitcast i8* %81 to i2* + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 2) + %84 = bitcast i8* %83 to i2* + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 3) + %86 = bitcast i8* %85 to i2* + store i2 -1, i2* %80, align 1 + store i2 1, i2* %82, align 1 + store i2 -1, i2* %84, align 1 + store i2 -1, i2* %86, align 1 + %87 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 0) + %89 = bitcast i8* %88 to i2* + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 1) + %91 = bitcast i8* %90 to i2* + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 2) + %93 = bitcast i8* %92 to i2* + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %87, i64 3) + %95 = bitcast i8* %94 to i2* + store i2 1, i2* %89, align 1 + store i2 -1, i2* %91, align 1 + store i2 1, i2* %93, align 1 + store i2 1, i2* %95, align 1 + %96 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 0) + %98 = bitcast i8* %97 to i2* + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 1) + %100 = bitcast i8* %99 to i2* + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 2) + %102 = bitcast i8* %101 to i2* + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 3) + %104 = bitcast i8* %103 to i2* + store i2 -1, i2* %98, align 1 + store i2 1, i2* %100, align 1 + store i2 1, i2* %102, align 1 + store i2 1, i2* %104, align 1 + %105 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i2* + %108 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 1) + %109 = bitcast i8* %108 to i2* + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 2) + %111 = bitcast i8* %110 to i2* + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 3) + %113 = bitcast i8* %112 to i2* + store i2 -1, i2* %107, align 1 + store i2 -1, i2* %109, align 1 + store i2 -1, i2* %111, align 1 + store i2 1, i2* %113, align 1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 2) + %120 = bitcast i8* %119 to i2* + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 3) + %122 = bitcast i8* %121 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + store i2 -1, i2* %120, align 1 + store i2 1, i2* %122, align 1 + %__qsVar10__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 0) + %124 = bitcast i8* %123 to %Array** + %125 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 1) + %126 = bitcast i8* %125 to %Array** + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 2) + %128 = bitcast i8* %127 to %Array** + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 3) + %130 = bitcast i8* %129 to %Array** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 4) + %132 = bitcast i8* %131 to %Array** + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 5) + %134 = bitcast i8* %133 to %Array** + %135 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 6) + %136 = bitcast i8* %135 to %Array** + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 7) + %138 = bitcast i8* %137 to %Array** + store %Array* %51, %Array** %124, align 8 + store %Array* %60, %Array** %126, align 8 + store %Array* %69, %Array** %128, align 8 + store %Array* %78, %Array** %130, align 8 + store %Array* %87, %Array** %132, align 8 + store %Array* %96, %Array** %134, align 8 + store %Array* %105, %Array** %136, align 8 + store %Array* %114, %Array** %138, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %139 = phi i64 [ 0, %continue__1 ], [ %144, %exiting__1 ] + %140 = icmp sle i64 %139, 7 + br i1 %140, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %141 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %139) + %142 = bitcast i8* %141 to %Array** + %143 = load %Array*, %Array** %142, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %143, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %144 = add i64 %139, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 1) + %145 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 0) + %147 = bitcast i8* %146 to i64* + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 1) + %149 = bitcast i8* %148 to i64* + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 2) + %151 = bitcast i8* %150 to i64* + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %145, i64 3) + %153 = bitcast i8* %152 to i64* + store i64 %__qsVar3__p__, i64* %147, align 4 + store i64 %__qsVar4__q__, i64* %149, align 4 + store i64 %__qsVar5__r__, i64* %151, align 4 + store i64 %__qsVar6__s__, i64* %153, align 4 + %154 = call { %Array*, %Array*, double }* @Microsoft__Quantum__Chemistry__JordanWigner___JordanWignerClusterOperatorPQRSTermSigns__body(%Array* %145) + %155 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 0 + %__qsVar11__sortedIndices__ = load %Array*, %Array** %155, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 1) + %156 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 1 + %__qsVar12__signs__ = load %Array*, %Array** %156, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 1) + %157 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %154, i32 0, i32 2 + %__qsVar13__globalSign__ = load double, double* %157, align 8 + %158 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %159 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar10__ops__, %Array* %__qsVar12__signs__) + %160 = call i64 @__quantum__rt__array_get_size_1d(%Array* %159) + %161 = sub i64 %160, 1 + %162 = insertvalue %Range zeroinitializer, i64 %161, 0 + %163 = insertvalue %Range %162, i64 -1, 1 + %164 = insertvalue %Range %163, i64 0, 2 + %165 = call %Array* @__quantum__rt__array_slice_1d(%Array* %158, %Range %164, i1 true) + %166 = call i64 @__quantum__rt__array_get_size_1d(%Array* %165) + %167 = sub i64 %166, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %168 = phi i64 [ 0, %exit__1 ], [ %182, %exiting__2 ] + %169 = icmp sle i64 %168, %167 + br i1 %169, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 %168) + %171 = bitcast i8* %170 to { %Array*, double }** + %172 = load { %Array*, double }*, { %Array*, double }** %171, align 8 + %173 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %172, i32 0, i32 0 + %__qsVar14__op__ = load %Array*, %Array** %173, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 1) + %174 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %172, i32 0, i32 1 + %__qsVar15__sign__ = load double, double* %174, align 8 + %175 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar16__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %175, %Array* %__qsVar11__sortedIndices__, %Array* %__qsVar14__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 1) + %176 = fmul double %__qsVar13__globalSign__, %__qsVar15__sign__ + %theta = fmul double %176, %__qsVar7__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %177 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %178 = bitcast %Tuple* %177 to { %Array*, double, %Array* }* + %179 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 0 + %180 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 1 + %181 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %178, i32 0, i32 2 + store %Array* %__qsVar16__pauliString__, %Array** %179, align 8 + store double %theta, double* %180, align 8 + store %Array* %qubits, %Array** %181, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %178) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %177, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar14__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar16__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar16__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %182 = add i64 %168, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %183 = phi i64 [ 0, %exit__2 ], [ %188, %exiting__3 ] + %184 = icmp sle i64 %183, 7 + br i1 %184, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %183) + %186 = bitcast i8* %185 to %Array** + %187 = load %Array*, %Array** %186, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %187, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %188 = add i64 %183, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar12__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %189 = phi i64 [ 0, %exit__3 ], [ %194, %exiting__4 ] + %190 = icmp sle i64 %189, 7 + br i1 %190, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %191 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar10__ops__, i64 %189) + %192 = bitcast i8* %191 to %Array** + %193 = load %Array*, %Array** %192, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %193, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %194 = add i64 %189, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar11__sortedIndices__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar12__signs__, i32 -1) + %195 = bitcast { %Array*, %Array*, double }* %154 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %195, i32 -1) + %196 = call i64 @__quantum__rt__array_get_size_1d(%Array* %158) + %197 = sub i64 %196, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %198 = phi i64 [ 0, %exit__4 ], [ %206, %exiting__5 ] + %199 = icmp sle i64 %198, %197 + br i1 %199, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %200 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %158, i64 %198) + %201 = bitcast i8* %200 to { %Array*, double }** + %202 = load { %Array*, double }*, { %Array*, double }** %201, align 8 + %203 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %202, i32 0, i32 0 + %204 = load %Array*, %Array** %203, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %204, i32 -1) + %205 = bitcast { %Array*, double }* %202 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %205, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %206 = add i64 %198, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %158, i32 -1) + %207 = sub i64 %160, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %208 = phi i64 [ 0, %exit__5 ], [ %216, %exiting__6 ] + %209 = icmp sle i64 %208, %207 + br i1 %209, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %210 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %159, i64 %208) + %211 = bitcast i8* %210 to { %Array*, double }** + %212 = load { %Array*, double }*, { %Array*, double }** %211, align 8 + %213 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %212, i32 0, i32 0 + %214 = load %Array*, %Array** %213, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %214, i32 -1) + %215 = bitcast { %Array*, double }* %212 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %215, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %216 = add i64 %208, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %159, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %165, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____body({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %coeff = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %8 = bitcast i8* %7 to i64* + %p = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %10 = bitcast i8* %9 to i64* + %q = load i64, i64* %10, align 4 + %11 = icmp eq i64 %p, %q + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__int_to_string(i64 %p) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__int_to_string(i64 %q) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__1: ; preds = %entry + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %22 = bitcast i8* %21 to double* + %23 = load double, double* %22, align 8 + %24 = fmul double 5.000000e-01, %23 + %angle = fmul double %24, %stepSize + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 1, i2* %27, align 1 + store i2 -1, i2* %29, align 1 + %30 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 0) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 1) + %34 = bitcast i8* %33 to i2* + store i2 -1, i2* %32, align 1 + store i2 1, i2* %34, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %36 = bitcast i8* %35 to %Array** + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %38 = bitcast i8* %37 to %Array** + store %Array* %25, %Array** %36, align 8 + store %Array* %30, %Array** %38, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %44, %exiting__1 ] + %40 = icmp sle i64 %39, 1 + br i1 %40, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %39) + %42 = bitcast i8* %41 to %Array** + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %44 = add i64 %39, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %signs = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 0) + %46 = bitcast i8* %45 to double* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 1) + %48 = bitcast i8* %47 to double* + store double 1.000000e+00, double* %46, align 8 + store double -1.000000e+00, double* %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %ops, %Array* %signs) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %52 = phi i64 [ 0, %exit__1 ], [ %60, %exiting__2 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { %Array*, double }** + %56 = load { %Array*, double }*, { %Array*, double }** %55, align 8 + %57 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %56, i32 0, i32 0 + %op = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %58 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %56, i32 0, i32 1 + %sign = load double, double* %58, align 8 + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %59, %Array* %idxFermions, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %theta = fmul double %sign, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %pauliString, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %60 = add i64 %52, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %61 = phi i64 [ 0, %exit__2 ], [ %66, %exiting__3 ] + %62 = icmp sle i64 %61, 1 + br i1 %62, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %61) + %64 = bitcast i8* %63 to %Array** + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %66 = add i64 %61, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %67 = phi i64 [ 0, %exit__3 ], [ %72, %exiting__4 ] + %68 = icmp sle i64 %67, 1 + br i1 %68, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %67) + %70 = bitcast i8* %69 to %Array** + %71 = load %Array*, %Array** %70, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %72 = add i64 %67, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %73 = sub i64 %50, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %74 = phi i64 [ 0, %exit__4 ], [ %82, %exiting__5 ] + %75 = icmp sle i64 %74, %73 + br i1 %75, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %74) + %77 = bitcast i8* %76 to { %Array*, double }** + %78 = load { %Array*, double }*, { %Array*, double }** %77, align 8 + %79 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %78, i32 0, i32 0 + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + %81 = bitcast { %Array*, double }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %81, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %82 = add i64 %74, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____adj({ { %Array*, %Array* }*, %Array* }* %term, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__p__ = load i64, i64* %8, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %10 = bitcast i8* %9 to i64* + %__qsVar4__q__ = load i64, i64* %10, align 4 + %11 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %11, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %20) + unreachable + +continue__1: ; preds = %entry + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %22 = bitcast i8* %21 to double* + %23 = load double, double* %22, align 8 + %24 = fmul double 5.000000e-01, %23 + %__qsVar5__angle__ = fmul double %24, %stepSize + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + store i2 1, i2* %27, align 1 + store i2 -1, i2* %29, align 1 + %30 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 0) + %32 = bitcast i8* %31 to i2* + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %30, i64 1) + %34 = bitcast i8* %33 to i2* + store i2 -1, i2* %32, align 1 + store i2 1, i2* %34, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %36 = bitcast i8* %35 to %Array** + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %38 = bitcast i8* %37 to %Array** + store %Array* %25, %Array** %36, align 8 + store %Array* %30, %Array** %38, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %44, %exiting__1 ] + %40 = icmp sle i64 %39, 1 + br i1 %40, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %39) + %42 = bitcast i8* %41 to %Array** + %43 = load %Array*, %Array** %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %44 = add i64 %39, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %__qsVar7__signs__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 0) + %46 = bitcast i8* %45 to double* + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 1) + %48 = bitcast i8* %47 to double* + store double 1.000000e+00, double* %46, align 8 + store double -1.000000e+00, double* %48, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %50 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %51 = call i64 @__quantum__rt__array_get_size_1d(%Array* %50) + %52 = sub i64 %51, 1 + %53 = insertvalue %Range zeroinitializer, i64 %52, 0 + %54 = insertvalue %Range %53, i64 -1, 1 + %55 = insertvalue %Range %54, i64 0, 2 + %56 = call %Array* @__quantum__rt__array_slice_1d(%Array* %49, %Range %55, i1 true) + %57 = call i64 @__quantum__rt__array_get_size_1d(%Array* %56) + %58 = sub i64 %57, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %59 = phi i64 [ 0, %exit__1 ], [ %67, %exiting__2 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %59) + %62 = bitcast i8* %61 to { %Array*, double }** + %63 = load { %Array*, double }*, { %Array*, double }** %62, align 8 + %64 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 0 + %__qsVar8__op__ = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %65 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %63, i32 0, i32 1 + %__qsVar9__sign__ = load double, double* %65, align 8 + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar10__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %66, %Array* %__qsVar2__idxFermions__, %Array* %__qsVar8__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + %theta = fmul double %__qsVar9__sign__, %__qsVar5__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %__qsVar10__pauliString__, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %67 = add i64 %59, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %68 = phi i64 [ 0, %exit__2 ], [ %73, %exiting__3 ] + %69 = icmp sle i64 %68, 1 + br i1 %69, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %68) + %71 = bitcast i8* %70 to %Array** + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %73 = add i64 %68, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %74 = phi i64 [ 0, %exit__3 ], [ %79, %exiting__4 ] + %75 = icmp sle i64 %74, 1 + br i1 %75, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %74) + %77 = bitcast i8* %76 to %Array** + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %79 = add i64 %74, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__signs__, i32 -1) + %80 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %81 = sub i64 %80, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %82 = phi i64 [ 0, %exit__4 ], [ %90, %exiting__5 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %82) + %85 = bitcast i8* %84 to { %Array*, double }** + %86 = load { %Array*, double }*, { %Array*, double }** %85, align 8 + %87 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %88, i32 -1) + %89 = bitcast { %Array*, double }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %89, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %90 = add i64 %82, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %91 = sub i64 %51, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %92 = phi i64 [ 0, %exit__5 ], [ %100, %exiting__6 ] + %93 = icmp sle i64 %92, %91 + br i1 %93, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %92) + %95 = bitcast i8* %94 to { %Array*, double }** + %96 = load { %Array*, double }*, { %Array*, double }** %95, align 8 + %97 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %96, i32 0, i32 0 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 -1) + %99 = bitcast { %Array*, double }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %99, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %100 = add i64 %92, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %coeff = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 0) + %12 = bitcast i8* %11 to i64* + %p = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 1) + %14 = bitcast i8* %13 to i64* + %q = load i64, i64* %14, align 4 + %15 = icmp eq i64 %p, %q + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__int_to_string(i64 %p) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__int_to_string(i64 %q) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %24 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %23) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %24) + unreachable + +continue__1: ; preds = %entry + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %26 = bitcast i8* %25 to double* + %27 = load double, double* %26, align 8 + %28 = fmul double 5.000000e-01, %27 + %angle = fmul double %28, %stepSize + %29 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 0) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 1) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %31, align 1 + store i2 -1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + store i2 -1, i2* %36, align 1 + store i2 1, i2* %38, align 1 + %ops = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 0) + %40 = bitcast i8* %39 to %Array** + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 1) + %42 = bitcast i8* %41 to %Array** + store %Array* %29, %Array** %40, align 8 + store %Array* %34, %Array** %42, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %43 = phi i64 [ 0, %continue__1 ], [ %48, %exiting__1 ] + %44 = icmp sle i64 %43, 1 + br i1 %44, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %48 = add i64 %43, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + %signs = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 0) + %50 = bitcast i8* %49 to double* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %signs, i64 1) + %52 = bitcast i8* %51 to double* + store double 1.000000e+00, double* %50, align 8 + store double -1.000000e+00, double* %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 1) + %53 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %ops, %Array* %signs) + %54 = call i64 @__quantum__rt__array_get_size_1d(%Array* %53) + %55 = sub i64 %54, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %56 = phi i64 [ 0, %exit__1 ], [ %69, %exiting__2 ] + %57 = icmp sle i64 %56, %55 + br i1 %57, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %56) + %59 = bitcast i8* %58 to { %Array*, double }** + %60 = load { %Array*, double }*, { %Array*, double }** %59, align 8 + %61 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %60, i32 0, i32 0 + %op = load %Array*, %Array** %61, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %62 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %60, i32 0, i32 1 + %sign = load double, double* %62, align 8 + %63 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %pauliString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %63, %Array* %idxFermions, %Array* %op) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 1) + %theta = fmul double %sign, %angle + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %64 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %65 = bitcast %Tuple* %64 to { %Array*, double, %Array* }* + %66 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 1 + %68 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %65, i32 0, i32 2 + store %Array* %pauliString, %Array** %66, align 8 + store double %theta, double* %67, align 8 + store %Array* %qubits, %Array** %68, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %65) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauliString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %pauliString, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %69 = add i64 %56, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %70 = phi i64 [ 0, %exit__2 ], [ %75, %exiting__3 ] + %71 = icmp sle i64 %70, 1 + br i1 %71, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %70) + %73 = bitcast i8* %72 to %Array** + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %75 = add i64 %70, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %signs, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %76 = phi i64 [ 0, %exit__3 ], [ %81, %exiting__4 ] + %77 = icmp sle i64 %76, 1 + br i1 %77, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %76) + %79 = bitcast i8* %78 to %Array** + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %81 = add i64 %76, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %signs, i32 -1) + %82 = sub i64 %54, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %83 = phi i64 [ 0, %exit__4 ], [ %91, %exiting__5 ] + %84 = icmp sle i64 %83, %82 + br i1 %84, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %83) + %86 = bitcast i8* %85 to { %Array*, double }** + %87 = load { %Array*, double }*, { %Array*, double }** %86, align 8 + %88 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %87, i32 0, i32 0 + %89 = load %Array*, %Array** %88, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 -1) + %90 = bitcast { %Array*, double }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %90, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %91 = add i64 %83, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %term = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__coeff__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %term, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__p__ = load i64, i64* %12, align 4 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar2__idxFermions__, i64 1) + %14 = bitcast i8* %13 to i64* + %__qsVar4__q__ = load i64, i64* %14, align 4 + %15 = icmp eq i64 %__qsVar3__p__, %__qsVar4__q__ + br i1 %15, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([44 x i8], [44 x i8]* @25, i32 0, i32 0)) + %17 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar3__p__) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @23, i32 0, i32 0)) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__int_to_string(i64 %__qsVar4__q__) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @24, i32 0, i32 0)) + %24 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %23) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__fail(%String* %24) + unreachable + +continue__1: ; preds = %entry + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coeff__, i64 0) + %26 = bitcast i8* %25 to double* + %27 = load double, double* %26, align 8 + %28 = fmul double 5.000000e-01, %27 + %__qsVar5__angle__ = fmul double %28, %stepSize + %29 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 0) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 1) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %31, align 1 + store i2 -1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + store i2 -1, i2* %36, align 1 + store i2 1, i2* %38, align 1 + %__qsVar6__ops__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 0) + %40 = bitcast i8* %39 to %Array** + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 1) + %42 = bitcast i8* %41 to %Array** + store %Array* %29, %Array** %40, align 8 + store %Array* %34, %Array** %42, align 8 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %43 = phi i64 [ 0, %continue__1 ], [ %48, %exiting__1 ] + %44 = icmp sle i64 %43, 1 + br i1 %44, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %48 = add i64 %43, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 1) + %__qsVar7__signs__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 0) + %50 = bitcast i8* %49 to double* + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar7__signs__, i64 1) + %52 = bitcast i8* %51 to double* + store double 1.000000e+00, double* %50, align 8 + store double -1.000000e+00, double* %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 1) + %53 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %54 = call %Array* @Microsoft__Quantum__Arrays___597ff22267984389a657a06cca0d63b9_Zipped__body(%Array* %__qsVar6__ops__, %Array* %__qsVar7__signs__) + %55 = call i64 @__quantum__rt__array_get_size_1d(%Array* %54) + %56 = sub i64 %55, 1 + %57 = insertvalue %Range zeroinitializer, i64 %56, 0 + %58 = insertvalue %Range %57, i64 -1, 1 + %59 = insertvalue %Range %58, i64 0, 2 + %60 = call %Array* @__quantum__rt__array_slice_1d(%Array* %53, %Range %59, i1 true) + %61 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %62 = sub i64 %61, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %63 = phi i64 [ 0, %exit__1 ], [ %76, %exiting__2 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %63) + %66 = bitcast i8* %65 to { %Array*, double }** + %67 = load { %Array*, double }*, { %Array*, double }** %66, align 8 + %68 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %67, i32 0, i32 0 + %__qsVar8__op__ = load %Array*, %Array** %68, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 1) + %69 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %67, i32 0, i32 1 + %__qsVar9__sign__ = load double, double* %69, align 8 + %70 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %__qsVar10__pauliString__ = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliString__body(i64 %70, %Array* %__qsVar2__idxFermions__, %Array* %__qsVar8__op__) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 1) + %theta = fmul double %__qsVar9__sign__, %__qsVar5__angle__ + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { %Array*, double, %Array* }* + %73 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 1 + %75 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %72, i32 0, i32 2 + store %Array* %__qsVar10__pauliString__, %Array** %73, align 8 + store double %theta, double* %74, align 8 + store %Array* %qubits, %Array** %75, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %72) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar8__op__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar10__pauliString__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar10__pauliString__, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %63, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coeff__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %77 = phi i64 [ 0, %exit__2 ], [ %82, %exiting__3 ] + %78 = icmp sle i64 %77, 1 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %77) + %80 = bitcast i8* %79 to %Array** + %81 = load %Array*, %Array** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %82 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar7__signs__, i32 -1) + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %83 = phi i64 [ 0, %exit__3 ], [ %88, %exiting__4 ] + %84 = icmp sle i64 %83, 1 + br i1 %84, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar6__ops__, i64 %83) + %86 = bitcast i8* %85 to %Array** + %87 = load %Array*, %Array** %86, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %88 = add i64 %83, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar6__ops__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar7__signs__, i32 -1) + %89 = call i64 @__quantum__rt__array_get_size_1d(%Array* %53) + %90 = sub i64 %89, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %91 = phi i64 [ 0, %exit__4 ], [ %99, %exiting__5 ] + %92 = icmp sle i64 %91, %90 + br i1 %92, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %91) + %94 = bitcast i8* %93 to { %Array*, double }** + %95 = load { %Array*, double }*, { %Array*, double }** %94, align 8 + %96 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %95, i32 0, i32 0 + %97 = load %Array*, %Array** %96, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %97, i32 -1) + %98 = bitcast { %Array*, double }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %98, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %99 = add i64 %91, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + %100 = sub i64 %55, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %101 = phi i64 [ 0, %exit__5 ], [ %109, %exiting__6 ] + %102 = icmp sle i64 %101, %100 + br i1 %102, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %54, i64 %101) + %104 = bitcast i8* %103 to { %Array*, double }** + %105 = load { %Array*, double }*, { %Array*, double }** %104, align 8 + %106 = getelementptr inbounds { %Array*, double }, { %Array*, double }* %105, i32 0, i32 0 + %107 = load %Array*, %Array** %106, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %107, i32 -1) + %108 = bitcast { %Array*, double }* %105 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %108, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %109 = add i64 %101, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret void +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }* getelementptr ({ %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__36__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__21__FunctionTable, %Tuple* %11) + %16 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionUnitary__body(%Callable* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret { %Callable* }* %16 +} + +define internal void @Lifted__PartialApplication__36__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %2 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %4 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %3, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %9, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %2, { { %Array*, %Array* }*, %Array* }** %10, align 8 + store double %5, double* %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__36__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array* }* }, { %Array*, { double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array* }*, { double, %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %6 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 1 + %7 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 0 + %9 = load double, double* %8, align 8 + %10 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %4, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %14 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %14, align 8 + store double %9, double* %15, align 8 + store %Array* %11, %Array** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* getelementptr ({ %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %19 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %13, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %5, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____body({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %5 = load double, double* %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____adj({ { %Array*, %Array* }*, %Array* }* %4, double %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctl(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }, { %Array*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { { %Array*, %Array* }*, %Array* }*, double, %Array* }*, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctladj(%Array* %3, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__21__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__21__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { { %Array*, %Array* }*, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { { %Array*, %Array* }*, %Array* }* }, { %Callable*, { { %Array*, %Array* }*, %Array* }* }* %0, i32 0, i32 1 + %4 = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %3, align 8 + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 0 + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 %count-change) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 %count-change) + %12 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %4, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = bitcast { { %Array*, %Array* }*, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %8 = bitcast i8* %7 to i64* + %termType = load i64, i64* %8, align 4 + %9 = icmp eq i64 %termType, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %termType, 2 + br i1 %10, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____body({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %1 = load { %Array*, %Array* }*, { %Array*, %Array* }** %0, align 8 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %1, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %4 = bitcast { %Array*, %Array* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %6 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %8 = bitcast i8* %7 to i64* + %__qsVar3__termType__ = load i64, i64* %8, align 4 + %9 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +test1__1: ; preds = %entry + %10 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %10, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____adj({ { %Array*, %Array* }*, %Array* }* %generatorIndex, double %stepSize, %Array* %qubits) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %idxTermType = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %idxDoubles = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxTermType, i64 0) + %12 = bitcast i8* %11 to i64* + %termType = load i64, i64* %12, align 4 + %13 = icmp eq i64 %termType, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %termType, 2 + br i1 %19, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____ctl(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxTermType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxDoubles, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorImpl____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 0 + %generatorIndex = load { { %Array*, %Array* }*, %Array* }*, { { %Array*, %Array* }*, %Array* }** %1, align 8 + %2 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 0 + %3 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 0 + %__qsVar0__idxTermType__ = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %3, i32 0, i32 1 + %__qsVar1__idxDoubles__ = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + %6 = bitcast { %Array*, %Array* }* %3 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { { %Array*, %Array* }*, %Array* }, { { %Array*, %Array* }*, %Array* }* %generatorIndex, i32 0, i32 1 + %__qsVar2__idxFermions__ = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %8 = bitcast { { %Array*, %Array* }*, %Array* }* %generatorIndex to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 1 + %stepSize = load double, double* %9, align 8 + %10 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__idxTermType__, i64 0) + %12 = bitcast i8* %11 to i64* + %__qsVar3__termType__ = load i64, i64* %12, align 4 + %13 = icmp eq i64 %__qsVar3__termType__, 0 + br i1 %13, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %16 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %16, align 8 + store double %stepSize, double* %17, align 8 + store %Array* %qubits, %Array** %18, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %19 = icmp eq i64 %__qsVar3__termType__, 2 + br i1 %19, label %then1__1, label %continue__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }* getelementptr ({ { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* + %22 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }*, double, %Array* }, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21, i32 0, i32 2 + store { { %Array*, %Array* }*, %Array* }* %generatorIndex, { { %Array*, %Array* }*, %Array* }** %22, align 8 + store double %stepSize, double* %23, align 8 + store %Array* %qubits, %Array** %24, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___ApplyJordanWignerClusterOperatorPQRSTerm____ctladj(%Array* %__controlQubits__, { { { %Array*, %Array* }*, %Array* }*, double, %Array* }* %21) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__1, %test1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__idxTermType__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__idxDoubles__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__idxFermions__, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____body(%Array* %qubitIndices) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %qubitIndices, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Array* }* + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Array* %qubitIndices, %Array** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__37__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__22__FunctionTable, %Tuple* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + ret %Callable* %5 +} + +define internal void @Lifted__PartialApplication__37__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__37__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__22__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__22__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %qubitIndices, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + call void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__body(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__adj(%Array* %qubitIndices, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + call void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__adj(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %qubitIndices = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %qubitIndices = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___86c36a8a845246bfb23f44646c7e9d24_Subarray__body(%Array* %qubitIndices, %Array* %qubits) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___c2c9de63210e4d19860498010c1645f5_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____body(%Array* %data, i64 %idx) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %idx) + %15 = bitcast i8* %14 to { { double, double }*, %Array* }** + %16 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %15, align 8 + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %16, i32 0, i32 0 + %18 = load { double, double }*, { double, double }** %17, align 8 + %19 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 0 + %real = load double, double* %19, align 8 + %20 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 1 + %imaginary = load double, double* %20, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %16, i32 0, i32 1 + %idxFermions = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %23 = icmp eq i64 %22, 2 + br i1 %23, label %then0__1, label %test1__1 + +then0__1: ; preds = %exit__1 + %24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 0) + %26 = bitcast i8* %25 to i64* + store i64 0, i64* %26, align 4 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 0) + %29 = bitcast i8* %28 to double* + store double %real, double* %29, align 8 + %30 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %31 = bitcast %Tuple* %30 to { %Array*, %Array* }* + %32 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 0 + %33 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %31, i32 0, i32 1 + store %Array* %24, %Array** %32, align 8 + store %Array* %27, %Array** %33, align 8 + %34 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %31, %Array* %idxFermions) + %35 = sub i64 %0, 1 + br label %header__2 + +test1__1: ; preds = %exit__1 + %36 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %37 = icmp eq i64 %36, 4 + br i1 %37, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 0) + %40 = bitcast i8* %39 to i64* + store i64 2, i64* %40, align 4 + %41 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 0) + %43 = bitcast i8* %42 to double* + store double %real, double* %43, align 8 + %44 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %45 = bitcast %Tuple* %44 to { %Array*, %Array* }* + %46 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %45, i32 0, i32 0 + %47 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %45, i32 0, i32 1 + store %Array* %38, %Array** %46, align 8 + store %Array* %41, %Array** %47, align 8 + %48 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %45, %Array* %idxFermions) + %49 = sub i64 %0, 1 + br label %header__3 + +else__1: ; preds = %test1__1 + %50 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 0) + %52 = bitcast i8* %51 to i64* + store i64 -1, i64* %52, align 4 + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 0) + %55 = bitcast i8* %54 to double* + store double 0.000000e+00, double* %55, align 8 + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { %Array*, %Array* }* + %58 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %57, i32 0, i32 1 + store %Array* %50, %Array** %58, align 8 + store %Array* %53, %Array** %59, align 8 + %60 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 0) + %62 = bitcast i8* %61 to i64* + store i64 0, i64* %62, align 4 + %63 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %57, %Array* %60) + %64 = sub i64 %0, 1 + br label %header__4 + +continue__1: ; No predecessors! + unreachable + +header__2: ; preds = %exiting__2, %then0__1 + %65 = phi i64 [ 0, %then0__1 ], [ %76, %exiting__2 ] + %66 = icmp sle i64 %65, %35 + br i1 %66, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %65) + %68 = bitcast i8* %67 to { { double, double }*, %Array* }** + %69 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %68, align 8 + %70 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %69, i32 0, i32 0 + %71 = load { double, double }*, { double, double }** %70, align 8 + %72 = bitcast { double, double }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %72, i32 -1) + %73 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %69, i32 0, i32 1 + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 -1) + %75 = bitcast { { double, double }*, %Array* }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %75, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %76 = add i64 %65, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %34 + +header__3: ; preds = %exiting__3, %then1__1 + %77 = phi i64 [ 0, %then1__1 ], [ %88, %exiting__3 ] + %78 = icmp sle i64 %77, %49 + br i1 %78, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %77) + %80 = bitcast i8* %79 to { { double, double }*, %Array* }** + %81 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %80, align 8 + %82 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %81, i32 0, i32 0 + %83 = load { double, double }*, { double, double }** %82, align 8 + %84 = bitcast { double, double }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %84, i32 -1) + %85 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %81, i32 0, i32 1 + %86 = load %Array*, %Array** %85, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %86, i32 -1) + %87 = bitcast { { double, double }*, %Array* }* %81 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %87, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %88 = add i64 %77, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %44, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %48 + +header__4: ; preds = %exiting__4, %else__1 + %89 = phi i64 [ 0, %else__1 ], [ %100, %exiting__4 ] + %90 = icmp sle i64 %89, %64 + br i1 %90, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %89) + %92 = bitcast i8* %91 to { { double, double }*, %Array* }** + %93 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %92, align 8 + %94 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %93, i32 0, i32 0 + %95 = load { double, double }*, { double, double }** %94, align 8 + %96 = bitcast { double, double }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %96, i32 -1) + %97 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %93, i32 0, i32 1 + %98 = load %Array*, %Array** %97, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 -1) + %99 = bitcast { { double, double }*, %Array* }* %93 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %99, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %100 = add i64 %89, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %63 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerBitString__body(i64 %nFermions, %Array* %idxFermions) { +entry: + %zString = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %1 = srem i64 %0, 2 + %2 = icmp ne i64 %1, 0 + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([86 x i8], [86 x i8]* @26, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nFermions) + %5 = sub i64 %nFermions, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %6 = phi i64 [ 0, %continue__1 ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %6) + %9 = bitcast i8* %8 to i1* + store i1 false, i1* %9, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %4, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %idxFermions) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %24, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %13) + %16 = bitcast i8* %15 to i64* + %fermionIdx = load i64, i64* %16, align 4 + %17 = icmp sge i64 %fermionIdx, %nFermions + br i1 %17, label %then0__2, label %continue__2 + +then0__2: ; preds = %body__2 + %18 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([46 x i8], [46 x i8]* @27, i32 0, i32 0)) + %19 = call %String* @__quantum__rt__int_to_string(i64 %fermionIdx) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @28, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + %23 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__fail(%String* %22) + unreachable + +continue__2: ; preds = %body__2 + br label %header__3 + +exiting__2: ; preds = %exit__3 + %24 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %25 = sub i64 %11, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %continue__2 + %idx = phi i64 [ 0, %continue__2 ], [ %35, %exiting__3 ] + %26 = icmp sle i64 %idx, %fermionIdx + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = call %Array* @__quantum__rt__array_copy(%Array* %27, i1 false) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %idx) + %30 = bitcast i8* %29 to i1* + %31 = load i1, i1* %30, align 1 + %32 = xor i1 %31, true + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %idx) + %34 = bitcast i8* %33 to i1* + store i1 %32, i1* %34, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + store %Array* %28, %Array** %zString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + br label %exiting__2 + +header__4: ; preds = %exiting__4, %exit__2 + %36 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__4 ] + %37 = icmp sle i64 %36, %25 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %idxFermions, i64 %36) + %39 = bitcast i8* %38 to i64* + %fermionIdx__1 = load i64, i64* %39, align 4 + %40 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + %41 = call %Array* @__quantum__rt__array_copy(%Array* %40, i1 false) + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %fermionIdx__1) + %43 = bitcast i8* %42 to i1* + store i1 false, i1* %43, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + store %Array* %41, %Array** %zString, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %44 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %45 = load %Array*, %Array** %zString, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + ret %Array* %45 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerPauliZString__body(i64 %nFermions, %Array* %idxFermions) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %bitString = call %Array* @Microsoft__Quantum__Chemistry__JordanWigner___ComputeJordanWignerBitString__body(i64 %nFermions, %Array* %idxFermions) + call void @__quantum__rt__array_update_alias_count(%Array* %bitString, i32 1) + %0 = call %Array* @Microsoft__Quantum__Convert__BoolArrayAsPauli__body(i2 -2, i1 true, %Array* %bitString) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bitString, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bitString, i32 -1) + ret %Array* %0 +} + +define internal { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorEvolutionSet__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { %Callable* }* @Microsoft__Quantum__Simulation__EvolutionSet__body(%Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array* }*, %Array* }* + %1 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___JordanWignerClusterOperatorFunction____body({ { %Array*, %Array* }*, %Array* }* %0) + %2 = bitcast %Tuple* %result-tuple to { { %Callable* }* }* + %3 = getelementptr inbounds { { %Callable* }* }, { { %Callable* }* }* %2, i32 0, i32 0 + store { %Callable* }* %1, { %Callable* }** %3, align 8 + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorGeneratorSystem__body(%Array* %data) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %16) + %19 = bitcast i8* %18 to { { double, double }*, %Array* }** + %20 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %19, align 8 + %21 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 0 + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 1) + %24 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %20, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 1) + %26 = bitcast { { double, double }*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %data, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, %Array* }* + %30 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %29, i32 0, i32 1 + store %Callable* %14, %Callable** %30, align 8 + store %Array* %data, %Array** %31, align 8 + %32 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__38__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__23__FunctionTable, %Tuple* %28) + %33 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %0, %Callable* %32) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %46, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %35) + %38 = bitcast i8* %37 to { { double, double }*, %Array* }** + %39 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %38, align 8 + %40 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %39, i32 0, i32 0 + %41 = load { double, double }*, { double, double }** %40, align 8 + %42 = bitcast { double, double }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %43 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %39, i32 0, i32 1 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %45 = bitcast { { double, double }*, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %46 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + ret { i64, %Callable* }* %33 +} + +define internal void @Lifted__PartialApplication__38__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { i64 }* + %4 = getelementptr inbounds { i64 }, { i64 }* %3, i32 0, i32 0 + %5 = load i64, i64* %4, align 4 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, i64 }* getelementptr ({ %Array*, i64 }, { %Array*, i64 }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, i64 }* + %8 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store i64 %5, i64* %9, align 4 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load i64, i64* %2, align 4 + %5 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3__JordanWignerStateAsGeneratorIndex____body(%Array* %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %7 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %6, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %5, { { %Array*, %Array* }*, %Array* }** %7, align 8 + ret void +} + +define internal void @MemoryManagement__23__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__23__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerGeneratorSystem__body({ %Array*, %Array*, %Array*, %Array* }* %data) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 0 + %ZData = load %Array*, %Array** %0, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZData) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %3) + %6 = bitcast i8* %5 to { %Array*, %Array* }** + %7 = load { %Array*, %Array* }*, { %Array*, %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array*, %Array* }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %14 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 1 + %ZZData = load %Array*, %Array** %14, align 8 + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ZZData) + %16 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %17) + %20 = bitcast i8* %19 to { %Array*, %Array* }** + %21 = load { %Array*, %Array* }*, { %Array*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %21, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %28 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 2 + %PQandPQQRData = load %Array*, %Array** %28, align 8 + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %PQandPQQRData) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %31) + %34 = bitcast i8* %33 to { %Array*, %Array* }** + %35 = load { %Array*, %Array* }*, { %Array*, %Array* }** %34, align 8 + %36 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 0 + %37 = load %Array*, %Array** %36, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %37, i32 1) + %38 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %35, i32 0, i32 1 + %39 = load %Array*, %Array** %38, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 1) + %40 = bitcast { %Array*, %Array* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %42 = getelementptr inbounds { %Array*, %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array*, %Array* }* %data, i32 0, i32 3 + %h0123Data = load %Array*, %Array** %42, align 8 + %43 = call i64 @__quantum__rt__array_get_size_1d(%Array* %h0123Data) + %44 = sub i64 %43, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %55, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %45) + %48 = bitcast i8* %47 to { %Array*, %Array* }** + %49 = load { %Array*, %Array* }*, { %Array*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 0 + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 1) + %52 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %49, i32 0, i32 1 + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + %54 = bitcast { %Array*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %55 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %56 = bitcast { %Array*, %Array*, %Array*, %Array* }* %data to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = sub i64 %1, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %58 = phi i64 [ 0, %exit__4 ], [ %68, %exiting__5 ] + %59 = icmp sle i64 %58, %57 + br i1 %59, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %58) + %61 = bitcast i8* %60 to { %Array*, %Array* }** + %62 = load { %Array*, %Array* }*, { %Array*, %Array* }** %61, align 8 + %63 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 0 + %64 = load %Array*, %Array** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + %65 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %62, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + %67 = bitcast { %Array*, %Array* }* %62 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %68 = add i64 %58, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 1) + %69 = sub i64 %15, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %70 = phi i64 [ 0, %exit__5 ], [ %80, %exiting__6 ] + %71 = icmp sle i64 %70, %69 + br i1 %71, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %70) + %73 = bitcast i8* %72 to { %Array*, %Array* }** + %74 = load { %Array*, %Array* }*, { %Array*, %Array* }** %73, align 8 + %75 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 0 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %74, i32 0, i32 1 + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %78, i32 1) + %79 = bitcast { %Array*, %Array* }* %74 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %70, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 1) + %81 = sub i64 %29, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %82 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %83 = icmp sle i64 %82, %81 + br i1 %83, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %82) + %85 = bitcast i8* %84 to { %Array*, %Array* }** + %86 = load { %Array*, %Array* }*, { %Array*, %Array* }** %85, align 8 + %87 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 0 + %88 = load %Array*, %Array** %87, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + %89 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %86, i32 0, i32 1 + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 1) + %91 = bitcast { %Array*, %Array* }* %86 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %82, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 1) + %93 = sub i64 %43, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %94 = phi i64 [ 0, %exit__7 ], [ %104, %exiting__8 ] + %95 = icmp sle i64 %94, %93 + br i1 %95, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %96 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %94) + %97 = bitcast i8* %96 to { %Array*, %Array* }** + %98 = load { %Array*, %Array* }*, { %Array*, %Array* }** %97, align 8 + %99 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %98, i32 0, i32 1 + %102 = load %Array*, %Array** %101, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %103 = bitcast { %Array*, %Array* }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %104 = add i64 %94, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 1) + %105 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 0) + %107 = bitcast i8* %106 to i64* + store i64 0, i64* %107, align 4 + %ZGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %ZData, %Array* %105) + %108 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %ZGenSys, i32 0, i32 1 + %109 = load %Callable*, %Callable** %108, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %109, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %109, i32 1) + %110 = bitcast { i64, %Callable* }* %ZGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %110, i32 1) + %111 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %111, i64 0) + %113 = bitcast i8* %112 to i64* + store i64 1, i64* %113, align 4 + %ZZGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %ZZData, %Array* %111) + %114 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %ZZGenSys, i32 0, i32 1 + %115 = load %Callable*, %Callable** %114, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %115, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %115, i32 1) + %116 = bitcast { i64, %Callable* }* %ZZGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 1) + %117 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %117, i64 0) + %119 = bitcast i8* %118 to i64* + store i64 2, i64* %119, align 4 + %PQandPQQRGenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %PQandPQQRData, %Array* %117) + %120 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %PQandPQQRGenSys, i32 0, i32 1 + %121 = load %Callable*, %Callable** %120, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %121, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %121, i32 1) + %122 = bitcast { i64, %Callable* }* %PQandPQQRGenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 1) + %123 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %123, i64 0) + %125 = bitcast i8* %124 to i64* + store i64 3, i64* %125, align 4 + %h0123GenSys = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %h0123Data, %Array* %123) + %126 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %h0123GenSys, i32 0, i32 1 + %127 = load %Callable*, %Callable** %126, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %127, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %127, i32 1) + %128 = bitcast { i64, %Callable* }* %h0123GenSys to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %109, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %109, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %115, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %115, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %121, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %127, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %127, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + %129 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 4) + %130 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 0) + %131 = bitcast i8* %130 to { i64, %Callable* }** + %132 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 1) + %133 = bitcast i8* %132 to { i64, %Callable* }** + %134 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 2) + %135 = bitcast i8* %134 to { i64, %Callable* }** + %136 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 3) + %137 = bitcast i8* %136 to { i64, %Callable* }** + store { i64, %Callable* }* %ZGenSys, { i64, %Callable* }** %131, align 8 + store { i64, %Callable* }* %ZZGenSys, { i64, %Callable* }** %133, align 8 + store { i64, %Callable* }* %PQandPQQRGenSys, { i64, %Callable* }** %135, align 8 + store { i64, %Callable* }* %h0123GenSys, { i64, %Callable* }** %137, align 8 + %138 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__SumGeneratorSystems__body(%Array* %129) + %139 = sub i64 %1, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %140 = phi i64 [ 0, %exit__8 ], [ %150, %exiting__9 ] + %141 = icmp sle i64 %140, %139 + br i1 %141, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %140) + %143 = bitcast i8* %142 to { %Array*, %Array* }** + %144 = load { %Array*, %Array* }*, { %Array*, %Array* }** %143, align 8 + %145 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %144, i32 0, i32 0 + %146 = load %Array*, %Array** %145, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %146, i32 -1) + %147 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %144, i32 0, i32 1 + %148 = load %Array*, %Array** %147, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %148, i32 -1) + %149 = bitcast { %Array*, %Array* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %149, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %150 = add i64 %140, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %151 = sub i64 %15, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %152 = phi i64 [ 0, %exit__9 ], [ %162, %exiting__10 ] + %153 = icmp sle i64 %152, %151 + br i1 %153, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %152) + %155 = bitcast i8* %154 to { %Array*, %Array* }** + %156 = load { %Array*, %Array* }*, { %Array*, %Array* }** %155, align 8 + %157 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 0 + %158 = load %Array*, %Array** %157, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %158, i32 -1) + %159 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %156, i32 0, i32 1 + %160 = load %Array*, %Array** %159, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %160, i32 -1) + %161 = bitcast { %Array*, %Array* }* %156 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %162 = add i64 %152, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %163 = sub i64 %29, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %164 = phi i64 [ 0, %exit__10 ], [ %174, %exiting__11 ] + %165 = icmp sle i64 %164, %163 + br i1 %165, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %166 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %164) + %167 = bitcast i8* %166 to { %Array*, %Array* }** + %168 = load { %Array*, %Array* }*, { %Array*, %Array* }** %167, align 8 + %169 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %168, i32 0, i32 0 + %170 = load %Array*, %Array** %169, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %170, i32 -1) + %171 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %168, i32 0, i32 1 + %172 = load %Array*, %Array** %171, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %172, i32 -1) + %173 = bitcast { %Array*, %Array* }* %168 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %173, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %174 = add i64 %164, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %175 = sub i64 %43, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %176 = phi i64 [ 0, %exit__11 ], [ %186, %exiting__12 ] + %177 = icmp sle i64 %176, %175 + br i1 %177, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %176) + %179 = bitcast i8* %178 to { %Array*, %Array* }** + %180 = load { %Array*, %Array* }*, { %Array*, %Array* }** %179, align 8 + %181 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %180, i32 0, i32 0 + %182 = load %Array*, %Array** %181, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 -1) + %183 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %180, i32 0, i32 1 + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %184, i32 -1) + %185 = bitcast { %Array*, %Array* }* %180 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %185, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %186 = add i64 %176, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + %187 = sub i64 %1, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %188 = phi i64 [ 0, %exit__12 ], [ %198, %exiting__13 ] + %189 = icmp sle i64 %188, %187 + br i1 %189, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZData, i64 %188) + %191 = bitcast i8* %190 to { %Array*, %Array* }** + %192 = load { %Array*, %Array* }*, { %Array*, %Array* }** %191, align 8 + %193 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %192, i32 0, i32 0 + %194 = load %Array*, %Array** %193, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %194, i32 -1) + %195 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %192, i32 0, i32 1 + %196 = load %Array*, %Array** %195, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %196, i32 -1) + %197 = bitcast { %Array*, %Array* }* %192 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %197, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %198 = add i64 %188, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %ZData, i32 -1) + %199 = sub i64 %15, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %200 = phi i64 [ 0, %exit__13 ], [ %210, %exiting__14 ] + %201 = icmp sle i64 %200, %199 + br i1 %201, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %202 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ZZData, i64 %200) + %203 = bitcast i8* %202 to { %Array*, %Array* }** + %204 = load { %Array*, %Array* }*, { %Array*, %Array* }** %203, align 8 + %205 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %204, i32 0, i32 0 + %206 = load %Array*, %Array** %205, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 -1) + %207 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %204, i32 0, i32 1 + %208 = load %Array*, %Array** %207, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %208, i32 -1) + %209 = bitcast { %Array*, %Array* }* %204 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %210 = add i64 %200, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %ZZData, i32 -1) + %211 = sub i64 %29, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %212 = phi i64 [ 0, %exit__14 ], [ %222, %exiting__15 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %PQandPQQRData, i64 %212) + %215 = bitcast i8* %214 to { %Array*, %Array* }** + %216 = load { %Array*, %Array* }*, { %Array*, %Array* }** %215, align 8 + %217 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 0 + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + %219 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %216, i32 0, i32 1 + %220 = load %Array*, %Array** %219, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %220, i32 -1) + %221 = bitcast { %Array*, %Array* }* %216 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %221, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %222 = add i64 %212, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %PQandPQQRData, i32 -1) + %223 = sub i64 %43, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %224 = phi i64 [ 0, %exit__15 ], [ %234, %exiting__16 ] + %225 = icmp sle i64 %224, %223 + br i1 %225, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %226 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %h0123Data, i64 %224) + %227 = bitcast i8* %226 to { %Array*, %Array* }** + %228 = load { %Array*, %Array* }*, { %Array*, %Array* }** %227, align 8 + %229 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + %231 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %228, i32 0, i32 1 + %232 = load %Array*, %Array** %231, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %232, i32 -1) + %233 = bitcast { %Array*, %Array* }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %233, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %234 = add i64 %224, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %h0123Data, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %109, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %109, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %110, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %115, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %115, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %121, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %121, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %127, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %127, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %109, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %109, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %115, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %115, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %117, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %121, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %121, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %123, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %127, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %127, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 -1) + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %235 = phi i64 [ 0, %exit__16 ], [ %243, %exiting__17 ] + %236 = icmp sle i64 %235, 3 + br i1 %236, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %235) + %238 = bitcast i8* %237 to { i64, %Callable* }** + %239 = load { i64, %Callable* }*, { i64, %Callable* }** %238, align 8 + %240 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %239, i32 0, i32 1 + %241 = load %Callable*, %Callable** %240, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %241, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %241, i32 -1) + %242 = bitcast { i64, %Callable* }* %239 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %242, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %243 = add i64 %235, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 -1) + ret { i64, %Callable* }* %138 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Chemistry__HTermsToGenSys__body(%Array* %data, %Array* %termType) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %15) + %18 = bitcast i8* %17 to { %Array*, %Array* }** + %19 = load { %Array*, %Array* }*, { %Array*, %Array* }** %18, align 8 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 0 + %21 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %19, i32 0, i32 1 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 1) + %24 = bitcast { %Array*, %Array* }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, %Array*, %Array* }* + %28 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %27, i32 0, i32 2 + store %Callable* %13, %Callable** %28, align 8 + store %Array* %data, %Array** %29, align 8 + store %Array* %termType, %Array** %30, align 8 + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__43__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__28__FunctionTable, %Tuple* %26) + %32 = call { i64, %Callable* }* @Microsoft__Quantum__Simulation__GeneratorSystem__body(i64 %0, %Callable* %31) + %33 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %34) + %37 = bitcast i8* %36 to { %Array*, %Array* }** + %38 = load { %Array*, %Array* }*, { %Array*, %Array* }** %37, align 8 + %39 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + %41 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %38, i32 0, i32 1 + %42 = load %Array*, %Array** %41, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + %43 = bitcast { %Array*, %Array* }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + ret { i64, %Callable* }* %32 +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSparseMultiConfigurationalState__body(%Callable* %initialStatePreparation, %Array* %excitations, %Array* %qubits) { +entry: + %success = alloca i1, align 1 + %applyFlips = alloca %Array*, align 8 + %coefficientsNewComplexPolar = alloca %Array*, align 8 + %coefficientsSqrtAbs = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 1) + %nExcitations = call i64 @__quantum__rt__array_get_size_1d(%Array* %excitations) + %0 = sub i64 %nExcitations, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %1) + %4 = bitcast i8* %3 to { { double, double }*, %Array* }** + %5 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %4, align 8 + %6 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %5, i32 0, i32 0 + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %5, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { { double, double }*, %Array* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %excitations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %13 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %14 = sub i64 %nExcitations, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %15) + %18 = bitcast i8* %17 to double* + store double 0.000000e+00, double* %18, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %13, %Array** %coefficientsSqrtAbs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %20 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %21 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %22 = sub i64 %nExcitations, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %23) + %26 = bitcast i8* %25 to { double, double }** + store { double, double }* %20, { double, double }** %26, align 8 + %27 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %21, %Array** %coefficientsNewComplexPolar, align 8 + %29 = sub i64 %nExcitations, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %30 = phi i64 [ 0, %exit__3 ], [ %36, %exiting__4 ] + %31 = icmp sle i64 %30, %29 + br i1 %31, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %30) + %33 = bitcast i8* %32 to { double, double }** + %34 = load { double, double }*, { double, double }** %33, align 8 + %35 = bitcast { double, double }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %36 = add i64 %30, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %38 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nExcitations) + %39 = sub i64 %nExcitations, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %40 = phi i64 [ 0, %exit__4 ], [ %44, %exiting__5 ] + %41 = icmp sle i64 %40, %39 + br i1 %41, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %40) + %43 = bitcast i8* %42 to %Array** + store %Array* %37, %Array** %43, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %44 = add i64 %40, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + store %Array* %38, %Array** %applyFlips, align 8 + %45 = sub i64 %nExcitations, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %46 = phi i64 [ 0, %exit__5 ], [ %51, %exiting__6 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %46) + %49 = bitcast i8* %48 to %Array** + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %51 = add i64 %46, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 1) + %52 = sub i64 %nExcitations, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %idx = phi i64 [ 0, %exit__6 ], [ %94, %exiting__7 ] + %53 = icmp sle i64 %idx, %52 + br i1 %53, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %idx) + %55 = bitcast i8* %54 to { { double, double }*, %Array* }** + %56 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %55, align 8 + %57 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %56, i32 0, i32 0 + %x = load { double, double }*, { double, double }** %57, align 8 + %58 = bitcast { double, double }* %x to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 1) + %59 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %56, i32 0, i32 1 + %excitation = load %Array*, %Array** %59, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 1) + %60 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 -1) + %61 = call %Array* @__quantum__rt__array_copy(%Array* %60, i1 false) + %62 = getelementptr inbounds { double, double }, { double, double }* %x, i32 0, i32 0 + %63 = getelementptr inbounds { double, double }, { double, double }* %x, i32 0, i32 1 + %64 = load double, double* %62, align 8 + %65 = load double, double* %63, align 8 + %66 = call { double, double }* @Microsoft__Quantum__Math__Complex__body(double %64, double %65) + %67 = call { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %66) + %d = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %67) + %68 = call double @__quantum__qis__sqrt__body(double %d) + %69 = bitcast { double, double }* %66 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %69, i32 -1) + %70 = bitcast { double, double }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %70, i32 -1) + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %idx) + %72 = bitcast i8* %71 to double* + store double %68, double* %72, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %61, i32 1) + store %Array* %61, %Array** %coefficientsSqrtAbs, align 8 + %73 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 -1) + %74 = call %Array* @__quantum__rt__array_copy(%Array* %73, i1 false) + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 %idx) + %76 = bitcast i8* %75 to double* + %77 = load double, double* %76, align 8 + %78 = call { double, double }* @Microsoft__Quantum__Math__Complex__body(double %64, double %65) + %79 = call { double, double }* @Microsoft__Quantum__Math__ComplexAsComplexPolar__body({ double, double }* %78) + %80 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %79) + %81 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %77, double %80) + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 %idx) + %83 = bitcast i8* %82 to { double, double }** + %84 = bitcast { double, double }* %81 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %84, i32 1) + %85 = load { double, double }*, { double, double }** %83, align 8 + %86 = bitcast { double, double }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 -1) + store { double, double }* %81, { double, double }** %83, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 1) + store %Array* %74, %Array** %coefficientsNewComplexPolar, align 8 + %87 = load %Array*, %Array** %applyFlips, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %87, i32 -1) + %88 = call %Array* @__quantum__rt__array_copy(%Array* %87, i1 false) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 %idx) + %90 = bitcast i8* %89 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %excitation, i32 1) + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %91, i32 -1) + store %Array* %excitation, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 1) + store %Array* %88, %Array** %applyFlips, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %excitation, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 -1) + %92 = bitcast { double, double }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %92, i32 -1) + %93 = bitcast { double, double }* %79 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %93, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %94 = add i64 %idx, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + %95 = sitofp i64 %nExcitations to double + %96 = call double @Microsoft__Quantum__Math__Lg__body(double %95) + %nBitsIndices = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %96) + br label %repeat__1 + +repeat__1: ; preds = %fixup__1, %exit__7 + store i1 false, i1* %success, align 1 + %97 = add i64 %nBitsIndices, 1 + %auxillary = call %Array* @__quantum__rt__qubit_allocate_array(i64 %97) + call void @__quantum__rt__array_update_alias_count(%Array* %auxillary, i32 1) + %flag = call %Qubit* @__quantum__rt__qubit_allocate() + %98 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %99 = load %Array*, %Array** %applyFlips, align 8 + %100 = call %Array* @Microsoft__Quantum__Arrays___ac214dcd588b470fb29f1cc67e145065_Mapped__body(%Callable* %98, %Array* %99) + %101 = call %Callable* @Microsoft__Quantum__Arrays___fc3dc354bc024fd5b7f38df86565fb27_LookupFunction__body(%Array* %100) + %multiplexer = call %Callable* @Microsoft__Quantum__Canon__MultiplexerBruteForceFromGenerator__body(i64 %nExcitations, %Callable* %101) + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 1) + %102 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + %103 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateCP__body(%Array* %102, { %Array* }* %103) + %104 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %105 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array* }*, %Array* }* getelementptr ({ { %Array* }*, %Array* }, { { %Array* }*, %Array* }* null, i32 1) to i64)) + %106 = bitcast %Tuple* %105 to { { %Array* }*, %Array* }* + %107 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %106, i32 0, i32 0 + %108 = getelementptr inbounds { { %Array* }*, %Array* }, { { %Array* }*, %Array* }* %106, i32 0, i32 1 + store { %Array* }* %104, { %Array* }** %107, align 8 + store %Array* %qubits, %Array** %108, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %multiplexer, %Tuple* %105, %Tuple* null) + %109 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + %110 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %auxillary) + call void @Microsoft__Quantum__Preparation__PrepareArbitraryStateD__adj(%Array* %109, { %Array* }* %110) + %111 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %112 = call %Callable* @Microsoft__Quantum__Canon___79e0da793bac4e01ba7a8549000baf29_ControlledOnInt__body(i64 0, %Callable* %111) + call void @__quantum__rt__array_update_reference_count(%Array* %auxillary, i32 1) + %113 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %114 = bitcast %Tuple* %113 to { %Array*, %Qubit* }* + %115 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %114, i32 0, i32 0 + %116 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %114, i32 0, i32 1 + store %Array* %auxillary, %Array** %115, align 8 + store %Qubit* %flag, %Qubit** %116, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %112, %Tuple* %113, %Tuple* null) + %outcome = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %flag) + %117 = call %Result* @__quantum__rt__result_get_one() + %118 = call i1 @__quantum__rt__result_equal(%Result* %outcome, %Result* %117) + store i1 %118, i1* %success, align 1 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %auxillary) + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %flag) + %119 = getelementptr inbounds { %Array* }, { %Array* }* %103, i32 0, i32 0 + %120 = load %Array*, %Array** %119, align 8 + %121 = getelementptr inbounds { %Array* }, { %Array* }* %104, i32 0, i32 0 + %122 = load %Array*, %Array** %121, align 8 + %123 = getelementptr inbounds { %Array* }, { %Array* }* %110, i32 0, i32 0 + %124 = load %Array*, %Array** %123, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %98, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %98, i32 -1) + %125 = call i64 @__quantum__rt__array_get_size_1d(%Array* %100) + %126 = sub i64 %125, 1 + br label %header__8 + +until__1: ; preds = %exit__8 + br i1 %118, label %rend__1, label %fixup__1 + +fixup__1: ; preds = %until__1 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) + br label %repeat__1 + +rend__1: ; preds = %until__1 + %127 = load %Array*, %Array** %coefficientsSqrtAbs, align 8 + %128 = load %Array*, %Array** %coefficientsNewComplexPolar, align 8 + %129 = load %Array*, %Array** %applyFlips, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + %130 = sub i64 %nExcitations, 1 + br label %header__9 + +header__8: ; preds = %exiting__8, %repeat__1 + %131 = phi i64 [ 0, %repeat__1 ], [ %136, %exiting__8 ] + %132 = icmp sle i64 %131, %126 + br i1 %132, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %100, i64 %131) + %134 = bitcast i8* %133 to %Callable** + %135 = load %Callable*, %Callable** %134, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %135, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %135, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %136 = add i64 %131, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %100, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %multiplexer, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + %137 = bitcast { %Array* }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %137, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %122, i32 -1) + %138 = bitcast { %Array* }* %104 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %138, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %105, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %124, i32 -1) + %139 = bitcast { %Array* }* %110 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %139, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %112, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %112, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %auxillary, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %113, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %outcome, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %flag) + call void @__quantum__rt__array_update_alias_count(%Array* %auxillary, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %auxillary) + br label %until__1 + +header__9: ; preds = %exiting__9, %rend__1 + %140 = phi i64 [ 0, %rend__1 ], [ %151, %exiting__9 ] + %141 = icmp sle i64 %140, %130 + br i1 %141, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %excitations, i64 %140) + %143 = bitcast i8* %142 to { { double, double }*, %Array* }** + %144 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %143, align 8 + %145 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %144, i32 0, i32 0 + %146 = load { double, double }*, { double, double }** %145, align 8 + %147 = bitcast { double, double }* %146 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %147, i32 -1) + %148 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %144, i32 0, i32 1 + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 -1) + %150 = bitcast { { double, double }*, %Array* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %150, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %151 = add i64 %140, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %excitations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 -1) + %152 = call i64 @__quantum__rt__array_get_size_1d(%Array* %128) + %153 = sub i64 %152, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %154 = phi i64 [ 0, %exit__9 ], [ %160, %exiting__10 ] + %155 = icmp sle i64 %154, %153 + br i1 %155, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %156 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %154) + %157 = bitcast i8* %156 to { double, double }** + %158 = load { double, double }*, { double, double }** %157, align 8 + %159 = bitcast { double, double }* %158 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %160 = add i64 %154, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %128, i32 -1) + %161 = call i64 @__quantum__rt__array_get_size_1d(%Array* %129) + %162 = sub i64 %161, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %163) + %166 = bitcast i8* %165 to %Array** + %167 = load %Array*, %Array** %166, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %129, i32 -1) + %169 = bitcast { double, double }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %169, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %127, i32 -1) + %170 = sub i64 %152, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %177, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %171) + %174 = bitcast i8* %173 to { double, double }** + %175 = load { double, double }*, { double, double }** %174, align 8 + %176 = bitcast { double, double }* %175 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %176, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %177 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %128, i32 -1) + %178 = sub i64 %161, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %179 = phi i64 [ 0, %exit__12 ], [ %184, %exiting__13 ] + %180 = icmp sle i64 %179, %178 + br i1 %180, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %181 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %129, i64 %179) + %182 = bitcast i8* %181 to %Array** + %183 = load %Array*, %Array** %182, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %183, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %184 = add i64 %179, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 -1) + ret void +} + +declare void @__quantum__rt__qubit_release(%Qubit*) + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Callable* @Microsoft__Quantum__Chemistry__JordanWigner____QsRef3___PrepareSingleConfigurationalStateSingleSiteOccupation____body(%Array* %2) + %4 = bitcast %Tuple* %result-tuple to { %Callable* }* + %5 = getelementptr inbounds { %Callable* }, { %Callable* }* %4, i32 0, i32 0 + store %Callable* %3, %Callable** %5, align 8 + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %stateData, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %stateData, i32 0, i32 1 + %terms = load %Array*, %Array** %0, align 8 + %nTerms = call i64 @__quantum__rt__array_get_size_1d(%Array* %terms) + %1 = sub i64 %nTerms, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 1) + %14 = bitcast { i64, %Array* }* %stateData to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %15 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %stateData, i32 0, i32 0 + %stateType = load i64, i64* %15, align 4 + %16 = sub i64 %nTerms, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %17 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %17) + %20 = bitcast i8* %19 to { { double, double }*, %Array* }** + %21 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %20, align 8 + %22 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %21, i32 0, i32 0 + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + %25 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %21, i32 0, i32 1 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = bitcast { { double, double }*, %Array* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 1) + %29 = icmp eq i64 %stateType, 2 + br i1 %29, label %then0__1, label %test1__1 + +then0__1: ; preds = %exit__2 + %30 = call i1 @Microsoft__Quantum__Arrays___d03f28613a2a406a92da3539b001d776_IsEmpty__body(%Array* %terms) + br i1 %30, label %then0__2, label %test1__2 + +then0__2: ; preds = %then0__1 + br label %continue__2 + +test1__2: ; preds = %then0__1 + %31 = icmp eq i64 %nTerms, 1 + br i1 %31, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 0) + %33 = bitcast i8* %32 to { { double, double }*, %Array* }** + %34 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %33, align 8 + %35 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %34, i32 0, i32 0 + %coefficient = load { double, double }*, { double, double }** %35, align 8 + %36 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 1) + %37 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %34, i32 0, i32 1 + %qubitIndices = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 1) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSingleConfigurationalStateSingleSiteOccupation__body(%Array* %qubitIndices, %Array* %qubits) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubitIndices, i32 -1) + br label %continue__2 + +else__1: ; preds = %test1__2 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareSparseMultiConfigurationalState__body(%Callable* %38, %Array* %terms, %Array* %qubits) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %38, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %then1__1, %then0__2 + br label %continue__1 + +test1__1: ; preds = %exit__2 + %39 = icmp eq i64 %stateType, 3 + br i1 %39, label %then1__2, label %continue__1 + +then1__2: ; preds = %test1__1 + %40 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %41 = sub i64 %nTerms, 1 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %41) + %43 = bitcast i8* %42 to { { double, double }*, %Array* }** + %44 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %43, align 8 + %45 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %44, i32 0, i32 0 + %46 = load { double, double }*, { double, double }** %45, align 8 + %47 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %44, i32 0, i32 1 + %48 = load %Array*, %Array** %47, align 8 + %49 = bitcast { double, double }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %48, i32 1) + %50 = bitcast { { double, double }*, %Array* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 1) + %51 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 0) + %53 = bitcast i8* %52 to { { double, double }*, %Array* }** + store { { double, double }*, %Array* }* %44, { { double, double }*, %Array* }** %53, align 8 + %54 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i64, %Array* }* getelementptr ({ %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* null, i32 1) to i64)) + %55 = bitcast %Tuple* %54 to { %Callable*, i64, %Array* }* + %56 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 0 + %57 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 1 + %58 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %55, i32 0, i32 2 + store %Callable* %40, %Callable** %56, align 8 + store i64 2, i64* %57, align 4 + store %Array* %51, %Array** %58, align 8 + %referenceState = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__39__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__24__FunctionTable, %Tuple* %54) + call void @__quantum__rt__capture_update_alias_count(%Callable* %referenceState, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %referenceState, i32 1) + %59 = sub i64 %nTerms, 2 + %60 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %59, 2 + %61 = call %Array* @__quantum__rt__array_slice_1d(%Array* %terms, %Range %60, i1 true) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareUnitaryCoupledClusterState__body(%Callable* %referenceState, %Array* %61, double 1.000000e+00, %Array* %qubits) + call void @__quantum__rt__capture_update_alias_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %referenceState, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %61, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then1__2, %test1__1, %continue__2 + %62 = sub i64 %nTerms, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %continue__1 + %63 = phi i64 [ 0, %continue__1 ], [ %74, %exiting__3 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %63) + %66 = bitcast i8* %65 to { { double, double }*, %Array* }** + %67 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %66, align 8 + %68 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %67, i32 0, i32 0 + %69 = load { double, double }*, { double, double }** %68, align 8 + %70 = bitcast { double, double }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + %71 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %67, i32 0, i32 1 + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = bitcast { { double, double }*, %Array* }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %74 = add i64 %63, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + %75 = sub i64 %nTerms, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %76 = phi i64 [ 0, %exit__3 ], [ %87, %exiting__4 ] + %77 = icmp sle i64 %76, %75 + br i1 %77, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %terms, i64 %76) + %79 = bitcast i8* %78 to { { double, double }*, %Array* }** + %80 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %79, align 8 + %81 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %80, i32 0, i32 0 + %82 = load { double, double }*, { double, double }** %81, align 8 + %83 = bitcast { double, double }* %82 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %83, i32 -1) + %84 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %80, i32 0, i32 1 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { { double, double }*, %Array* }* %80 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %87 = add i64 %76, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %terms, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__body(%Array* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + call void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__adj(%Array* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctl(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___27e4b6f9eeae43ca9b9a8cbb18f6e6d3_NoOp__ctladj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Lifted__PartialApplication__39__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 1 + %2 = load i64, i64* %1, align 4 + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, %Array* }* + %7 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 1 + store i64 %2, i64* %7, align 4 + store %Array* %4, %Array** %8, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Array* }* + %10 = getelementptr inbounds { %Array* }, { %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { { i64, %Array* }*, %Array* }* + %14 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %13, i32 0, i32 1 + store { i64, %Array* }* %6, { i64, %Array* }** %14, align 8 + store %Array* %11, %Array** %15, align 8 + %16 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @MemoryManagement__24__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__24__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i64, %Array* }* + %1 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i64, %Array* }, { %Callable*, i64, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { double, double }*, %Array* }** + %11 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 0 + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %11, i32 0, i32 1 + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 %count-change) + %17 = bitcast { { double, double }*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareUnitaryCoupledClusterState__body(%Callable* %initialStatePreparation, %Array* %clusterOperator, double %trotterStepSize, %Array* %qubits) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %clusterOperator) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %clusterOperator, i64 %2) + %5 = bitcast i8* %4 to { { double, double }*, %Array* }** + %6 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 0 + %8 = load { double, double }*, { double, double }** %7, align 8 + %9 = bitcast { double, double }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %6, i32 0, i32 1 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { { double, double }*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %clusterOperator, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %clusterOperatorGeneratorSystem = call { i64, %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorGeneratorSystem__body(%Array* %clusterOperator) + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %clusterOperatorGeneratorSystem, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 1) + %16 = bitcast { i64, %Callable* }* %clusterOperatorGeneratorSystem to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = call { %Callable* }* @Microsoft__Quantum__Chemistry__JordanWigner__JordanWignerClusterOperatorEvolutionSet__body() + %evolutionGenerator = call { { %Callable* }*, { i64, %Callable* }* }* @Microsoft__Quantum__Simulation__EvolutionGenerator__body({ %Callable* }* %17, { i64, %Callable* }* %clusterOperatorGeneratorSystem) + %18 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 0 + %19 = load { %Callable* }*, { %Callable* }** %18, align 8 + %20 = getelementptr inbounds { %Callable* }, { %Callable* }* %19, i32 0, i32 0 + %21 = load %Callable*, %Callable** %20, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 1) + %22 = bitcast { %Callable* }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + %23 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, i32 0, i32 1 + %24 = load { i64, %Callable* }*, { i64, %Callable* }** %23, align 8 + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 1) + %27 = bitcast { i64, %Callable* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + %28 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + %29 = call { %Callable* }* @Microsoft__Quantum__Simulation__TrotterSimulationAlgorithm__body(double %trotterStepSize, i64 1) + %30 = getelementptr inbounds { %Callable* }, { %Callable* }* %29, i32 0, i32 0 + %simulationAlgorithm = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 1) + %31 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* getelementptr ({ %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %32 = bitcast %Tuple* %31 to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %33 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 0 + %34 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 1 + %35 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %32, i32 0, i32 2 + store %Callable* %simulationAlgorithm, %Callable** %33, align 8 + store double 1.000000e+00, double* %34, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %evolutionGenerator, { { %Callable* }*, { i64, %Callable* }* }** %35, align 8 + %oracle = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__40__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__25__FunctionTable, %Tuple* %31) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array* }* + %38 = getelementptr inbounds { %Array* }, { %Array* }* %37, i32 0, i32 0 + store %Array* %qubits, %Array** %38, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %initialStatePreparation, %Tuple* %36, %Tuple* null) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Array* }* + %41 = getelementptr inbounds { %Array* }, { %Array* }* %40, i32 0, i32 0 + store %Array* %qubits, %Array** %41, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %oracle, %Tuple* %39, %Tuple* null) + %42 = getelementptr inbounds { %Callable* }, { %Callable* }* %17, i32 0, i32 0 + %43 = load %Callable*, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %initialStatePreparation, i32 -1) + %44 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %45 = phi i64 [ 0, %exit__1 ], [ %56, %exiting__2 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %clusterOperator, i64 %45) + %48 = bitcast i8* %47 to { { double, double }*, %Array* }** + %49 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %48, align 8 + %50 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %49, i32 0, i32 0 + %51 = load { double, double }*, { double, double }** %50, align 8 + %52 = bitcast { double, double }* %51 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + %53 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %49, i32 0, i32 1 + %54 = load %Array*, %Array** %53, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %54, i32 -1) + %55 = bitcast { { double, double }*, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %56 = add i64 %45, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %clusterOperator, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %43, i32 -1) + %57 = bitcast { %Callable* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %57, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %26, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %simulationAlgorithm, i32 -1) + %58 = bitcast { %Callable* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %58, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %oracle, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %10 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %4, { { %Callable* }*, { i64, %Callable* }* }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__40__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %6 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 2 + %9 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* getelementptr ({ double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* + %12 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store { { %Callable* }*, { i64, %Callable* }* }* %9, { { %Callable* }*, { i64, %Callable* }* }** %13, align 8 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* getelementptr ({ %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }, { %Array*, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }* %11, { double, { { %Callable* }*, { i64, %Callable* }* }*, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @MemoryManagement__25__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__25__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }, { %Callable*, double, { { %Callable* }*, { i64, %Callable* }* }* }* %0, i32 0, i32 2 + %4 = load { { %Callable* }*, { i64, %Callable* }* }*, { { %Callable* }*, { i64, %Callable* }* }** %3, align 8 + %5 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 0 + %6 = load { %Callable* }*, { %Callable* }** %5, align 8 + %7 = getelementptr inbounds { %Callable* }, { %Callable* }* %6, i32 0, i32 0 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + %9 = bitcast { %Callable* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + %10 = getelementptr inbounds { { %Callable* }*, { i64, %Callable* }* }, { { %Callable* }*, { i64, %Callable* }* }* %4, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 %count-change) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + %15 = bitcast { { %Callable* }*, { i64, %Callable* }* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____body({ i64, %Array* }* %inputState, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { double, double }*, %Array* }** + %8 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 0 + %10 = load { double, double }*, { double, double }** %9, align 8 + %11 = bitcast { double, double }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { { double, double }*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertAllZero__body(%Array* %qubits) + call void @Microsoft__Quantum__Chemistry__JordanWigner__PrepareTrialState__body({ i64, %Array* }* %inputState, %Array* %qubits) + %17 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %18) + %21 = bitcast i8* %20 to { { double, double }*, %Array* }** + %22 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 0 + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + %26 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = bitcast { { double, double }*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____adj({ i64, %Array* }* %inputState, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %inputState, i32 0, i32 1 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { double, double }*, %Array* }** + %8 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %7, align 8 + %9 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 0 + %10 = load { double, double }*, { double, double }** %9, align 8 + %11 = bitcast { double, double }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %8, i32 0, i32 1 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + %14 = bitcast { { double, double }*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = bitcast { i64, %Array* }* %inputState to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) + %17 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %18) + %21 = bitcast i8* %20 to { { double, double }*, %Array* }** + %22 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 0 + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + %26 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %22, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = bitcast { { double, double }*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__MeasurementOperators__body(i64 %nQubits, %Array* %indices, i64 %termType) { +entry: + %op__2 = alloca %Array*, align 8 + %compactOp__1 = alloca %Array*, align 8 + %op__1 = alloca %Array*, align 8 + %compactOp = alloca %Array*, align 8 + %op = alloca %Array*, align 8 + %ops = alloca %Array*, align 8 + %nOps = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + store i64 0, i64* %nOps, align 4 + %0 = icmp eq i64 %termType, 2 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + store i64 2, i64* %nOps, align 4 + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i64 %termType, 3 + br i1 %1, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + store i64 8, i64* %nOps, align 4 + br label %continue__1 + +else__1: ; preds = %test1__1 + store i64 1, i64* %nOps, align 4 + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + %2 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 0) + %3 = load i64, i64* %nOps, align 4 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %3) + %5 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %6 = phi i64 [ 0, %continue__1 ], [ %10, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %6) + %9 = bitcast i8* %8 to %Array** + store %Array* %2, %Array** %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %2, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %4, %Array** %ops, align 8 + %11 = sub i64 %3, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %17, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %12) + %15 = bitcast i8* %14 to %Array** + %16 = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %16, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %17 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %18 = icmp eq i64 %termType, 0 + br i1 %18, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %exit__2 + %19 = icmp eq i64 %termType, 1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %exit__2 + %20 = phi i1 [ %18, %exit__2 ], [ %19, %condFalse__1 ] + br i1 %20, label %then0__2, label %test1__2 + +then0__2: ; preds = %condContinue__1 + %21 = call %Array* @Microsoft__Quantum__Arrays___8023f18e08eb4c09a8a8acf673dba09b_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %21, %Array** %op, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %23 = sub i64 %22, 1 + br label %header__3 + +test1__2: ; preds = %condContinue__1 + %24 = icmp eq i64 %termType, 3 + br i1 %24, label %then1__2, label %test2__1 + +then1__2: ; preds = %test1__2 + %25 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 0) + %27 = bitcast i8* %26 to i2* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 1) + %29 = bitcast i8* %28 to i2* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 2) + %31 = bitcast i8* %30 to i2* + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %25, i64 3) + %33 = bitcast i8* %32 to i2* + store i2 1, i2* %27, align 1 + store i2 1, i2* %29, align 1 + store i2 1, i2* %31, align 1 + store i2 1, i2* %33, align 1 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i2* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 1) + %38 = bitcast i8* %37 to i2* + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 2) + %40 = bitcast i8* %39 to i2* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 3) + %42 = bitcast i8* %41 to i2* + store i2 -1, i2* %36, align 1 + store i2 -1, i2* %38, align 1 + store i2 -1, i2* %40, align 1 + store i2 -1, i2* %42, align 1 + %43 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 0) + %45 = bitcast i8* %44 to i2* + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 1) + %47 = bitcast i8* %46 to i2* + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 2) + %49 = bitcast i8* %48 to i2* + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 3) + %51 = bitcast i8* %50 to i2* + store i2 1, i2* %45, align 1 + store i2 1, i2* %47, align 1 + store i2 -1, i2* %49, align 1 + store i2 -1, i2* %51, align 1 + %52 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 0) + %54 = bitcast i8* %53 to i2* + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 1) + %56 = bitcast i8* %55 to i2* + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 2) + %58 = bitcast i8* %57 to i2* + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 3) + %60 = bitcast i8* %59 to i2* + store i2 -1, i2* %54, align 1 + store i2 -1, i2* %56, align 1 + store i2 1, i2* %58, align 1 + store i2 1, i2* %60, align 1 + %61 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 0) + %63 = bitcast i8* %62 to i2* + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 1) + %65 = bitcast i8* %64 to i2* + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 2) + %67 = bitcast i8* %66 to i2* + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %61, i64 3) + %69 = bitcast i8* %68 to i2* + store i2 1, i2* %63, align 1 + store i2 -1, i2* %65, align 1 + store i2 1, i2* %67, align 1 + store i2 -1, i2* %69, align 1 + %70 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 0) + %72 = bitcast i8* %71 to i2* + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 1) + %74 = bitcast i8* %73 to i2* + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 2) + %76 = bitcast i8* %75 to i2* + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %70, i64 3) + %78 = bitcast i8* %77 to i2* + store i2 -1, i2* %72, align 1 + store i2 1, i2* %74, align 1 + store i2 -1, i2* %76, align 1 + store i2 1, i2* %78, align 1 + %79 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 0) + %81 = bitcast i8* %80 to i2* + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 1) + %83 = bitcast i8* %82 to i2* + %84 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 2) + %85 = bitcast i8* %84 to i2* + %86 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %79, i64 3) + %87 = bitcast i8* %86 to i2* + store i2 -1, i2* %81, align 1 + store i2 1, i2* %83, align 1 + store i2 1, i2* %85, align 1 + store i2 -1, i2* %87, align 1 + %88 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 0) + %90 = bitcast i8* %89 to i2* + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 1) + %92 = bitcast i8* %91 to i2* + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 2) + %94 = bitcast i8* %93 to i2* + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 3) + %96 = bitcast i8* %95 to i2* + store i2 1, i2* %90, align 1 + store i2 -1, i2* %92, align 1 + store i2 -1, i2* %94, align 1 + store i2 1, i2* %96, align 1 + %compactOps = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 0) + %98 = bitcast i8* %97 to %Array** + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 1) + %100 = bitcast i8* %99 to %Array** + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 2) + %102 = bitcast i8* %101 to %Array** + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 3) + %104 = bitcast i8* %103 to %Array** + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 4) + %106 = bitcast i8* %105 to %Array** + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 5) + %108 = bitcast i8* %107 to %Array** + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 6) + %110 = bitcast i8* %109 to %Array** + %111 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 7) + %112 = bitcast i8* %111 to %Array** + store %Array* %25, %Array** %98, align 8 + store %Array* %34, %Array** %100, align 8 + store %Array* %43, %Array** %102, align 8 + store %Array* %52, %Array** %104, align 8 + store %Array* %61, %Array** %106, align 8 + store %Array* %70, %Array** %108, align 8 + store %Array* %79, %Array** %110, align 8 + store %Array* %88, %Array** %112, align 8 + br label %header__4 + +test2__1: ; preds = %test1__2 + %113 = icmp eq i64 %termType, 2 + br i1 %113, label %then2__1, label %continue__2 + +then2__1: ; preds = %test2__1 + %114 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 0) + %116 = bitcast i8* %115 to i2* + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 1) + %118 = bitcast i8* %117 to i2* + store i2 1, i2* %116, align 1 + store i2 1, i2* %118, align 1 + %119 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %119, i64 0) + %121 = bitcast i8* %120 to i2* + %122 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %119, i64 1) + %123 = bitcast i8* %122 to i2* + store i2 -1, i2* %121, align 1 + store i2 -1, i2* %123, align 1 + %compactOps__1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 0) + %125 = bitcast i8* %124 to %Array** + %126 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 1) + %127 = bitcast i8* %126 to %Array** + store %Array* %114, %Array** %125, align 8 + store %Array* %119, %Array** %127, align 8 + br label %header__12 + +continue__2: ; preds = %exit__16, %test2__1, %exit__11, %exit__3 + %128 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %129 = call i64 @__quantum__rt__array_get_size_1d(%Array* %128) + %130 = sub i64 %129, 1 + br label %header__17 + +header__3: ; preds = %exiting__3, %then0__2 + %131 = phi i64 [ 0, %then0__2 ], [ %139, %exiting__3 ] + %132 = icmp sle i64 %131, %23 + br i1 %132, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %131) + %134 = bitcast i8* %133 to i64* + %idx = load i64, i64* %134, align 4 + %135 = load %Array*, %Array** %op, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %135, i32 -1) + %136 = call %Array* @__quantum__rt__array_copy(%Array* %135, i1 false) + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %136, i64 %idx) + %138 = bitcast i8* %137 to i2* + store i2 -2, i2* %138, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %136, i32 1) + store %Array* %136, %Array** %op, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %135, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %139 = add i64 %131, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + %140 = call %Array* @__quantum__rt__array_copy(%Array* %4, i1 false) + %141 = load %Array*, %Array** %op, align 8 + %142 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %140, i64 0) + %143 = bitcast i8* %142 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %141, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 1) + %144 = load %Array*, %Array** %143, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %144, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %144, i32 -1) + store %Array* %141, %Array** %143, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %140, i32 1) + store %Array* %140, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %141, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + br label %continue__2 + +header__4: ; preds = %exiting__4, %then1__2 + %145 = phi i64 [ 0, %then1__2 ], [ %150, %exiting__4 ] + %146 = icmp sle i64 %145, 7 + br i1 %146, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %145) + %148 = bitcast i8* %147 to %Array** + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %150 = add i64 %145, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps, i32 1) + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %iOp = phi i64 [ 0, %exit__4 ], [ %159, %exiting__5 ] + %151 = icmp sle i64 %iOp, 7 + br i1 %151, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %iOp) + %153 = bitcast i8* %152 to %Array** + %154 = load %Array*, %Array** %153, align 8 + store %Array* %154, %Array** %compactOp, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 1) + %155 = call %Array* @Microsoft__Quantum__Arrays___8023f18e08eb4c09a8a8acf673dba09b_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %155, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %155, i32 1) + %156 = call %Array* @Microsoft__Quantum__Arrays___00d59157a6454ecdaf64b45c69ab4afd_Zipped__body(%Array* %indices, %Array* %154) + %157 = call i64 @__quantum__rt__array_get_size_1d(%Array* %156) + %158 = sub i64 %157, 1 + br label %header__6 + +exiting__5: ; preds = %exit__9 + %159 = add i64 %iOp, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + br label %header__10 + +header__6: ; preds = %exiting__6, %body__5 + %160 = phi i64 [ 0, %body__5 ], [ %171, %exiting__6 ] + %161 = icmp sle i64 %160, %158 + br i1 %161, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %156, i64 %160) + %163 = bitcast i8* %162 to { i64, i2 }** + %164 = load { i64, i2 }*, { i64, i2 }** %163, align 8 + %165 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %164, i32 0, i32 0 + %idx__1 = load i64, i64* %165, align 4 + %166 = getelementptr inbounds { i64, i2 }, { i64, i2 }* %164, i32 0, i32 1 + %pauli = load i2, i2* %166, align 1 + %167 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %167, i32 -1) + %168 = call %Array* @__quantum__rt__array_copy(%Array* %167, i1 false) + %169 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %168, i64 %idx__1) + %170 = bitcast i8* %169 to i2* + store i2 %pauli, i2* %170, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %168, i32 1) + store %Array* %168, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %167, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %171 = add i64 %160, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + %172 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %173 = bitcast i8* %172 to i64* + %174 = load i64, i64* %173, align 4 + %175 = add i64 %174, 1 + %176 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %177 = bitcast i8* %176 to i64* + %178 = load i64, i64* %177, align 4 + %179 = sub i64 %178, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %i = phi i64 [ %175, %exit__6 ], [ %185, %exiting__7 ] + %180 = icmp sle i64 %i, %179 + br i1 %180, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %181 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %181, i32 -1) + %182 = call %Array* @__quantum__rt__array_copy(%Array* %181, i1 false) + %183 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %i) + %184 = bitcast i8* %183 to i2* + store i2 -2, i2* %184, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 1) + store %Array* %182, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %181, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %185 = add i64 %i, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 2) + %187 = bitcast i8* %186 to i64* + %188 = load i64, i64* %187, align 4 + %189 = add i64 %188, 1 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %191 = bitcast i8* %190 to i64* + %192 = load i64, i64* %191, align 4 + %193 = sub i64 %192, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %i__1 = phi i64 [ %189, %exit__7 ], [ %199, %exiting__8 ] + %194 = icmp sle i64 %i__1, %193 + br i1 %194, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %195 = load %Array*, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %195, i32 -1) + %196 = call %Array* @__quantum__rt__array_copy(%Array* %195, i1 false) + %197 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %196, i64 %i__1) + %198 = bitcast i8* %197 to i2* + store i2 -2, i2* %198, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %196, i32 1) + store %Array* %196, %Array** %op__1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %195, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %199 = add i64 %i__1, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + %200 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %200, i32 -1) + %201 = call %Array* @__quantum__rt__array_copy(%Array* %200, i1 false) + %202 = load %Array*, %Array** %op__1, align 8 + %203 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %201, i64 %iOp) + %204 = bitcast i8* %203 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %202, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %202, i32 1) + %205 = load %Array*, %Array** %204, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %205, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %205, i32 -1) + store %Array* %202, %Array** %204, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %201, i32 1) + store %Array* %201, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %154, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %202, i32 -1) + %206 = sub i64 %157, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %207 = phi i64 [ 0, %exit__8 ], [ %213, %exiting__9 ] + %208 = icmp sle i64 %207, %206 + br i1 %208, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %209 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %156, i64 %207) + %210 = bitcast i8* %209 to { i64, i2 }** + %211 = load { i64, i2 }*, { i64, i2 }** %210, align 8 + %212 = bitcast { i64, i2 }* %211 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %212, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %213 = add i64 %207, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %156, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %200, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %202, i32 -1) + br label %exiting__5 + +header__10: ; preds = %exiting__10, %exit__5 + %214 = phi i64 [ 0, %exit__5 ], [ %219, %exiting__10 ] + %215 = icmp sle i64 %214, 7 + br i1 %215, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %216 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %214) + %217 = bitcast i8* %216 to %Array** + %218 = load %Array*, %Array** %217, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %218, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %219 = add i64 %214, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps, i32 -1) + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %220 = phi i64 [ 0, %exit__10 ], [ %225, %exiting__11 ] + %221 = icmp sle i64 %220, 7 + br i1 %221, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps, i64 %220) + %223 = bitcast i8* %222 to %Array** + %224 = load %Array*, %Array** %223, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %224, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %225 = add i64 %220, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_reference_count(%Array* %compactOps, i32 -1) + br label %continue__2 + +header__12: ; preds = %exiting__12, %then2__1 + %226 = phi i64 [ 0, %then2__1 ], [ %231, %exiting__12 ] + %227 = icmp sle i64 %226, 1 + br i1 %227, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %228 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %226) + %229 = bitcast i8* %228 to %Array** + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %231 = add i64 %226, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps__1, i32 1) + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %iOp__1 = phi i64 [ 0, %exit__12 ], [ %266, %exiting__13 ] + %232 = icmp sle i64 %iOp__1, 1 + br i1 %232, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %233 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %iOp__1) + %234 = bitcast i8* %233 to %Array** + %235 = load %Array*, %Array** %234, align 8 + store %Array* %235, %Array** %compactOp__1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %235, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %235, i32 1) + %236 = call %Array* @Microsoft__Quantum__Arrays___8023f18e08eb4c09a8a8acf673dba09b_ConstantArray__body(i64 %nQubits, i2 0) + store %Array* %236, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 1) + %nIndices = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + call void @__quantum__rt__array_update_alias_count(%Array* %236, i32 -1) + %237 = call %Array* @__quantum__rt__array_copy(%Array* %236, i1 false) + %238 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %235, i64 0) + %239 = bitcast i8* %238 to i2* + %240 = load i2, i2* %239, align 1 + %241 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %242 = bitcast i8* %241 to i64* + %243 = load i64, i64* %242, align 4 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %237, i64 %243) + %245 = bitcast i8* %244 to i2* + store i2 %240, i2* %245, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %237, i32 1) + store %Array* %237, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %237, i32 -1) + %246 = call %Array* @__quantum__rt__array_copy(%Array* %237, i1 false) + %247 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %235, i64 1) + %248 = bitcast i8* %247 to i2* + %249 = load i2, i2* %248, align 1 + %250 = sub i64 %nIndices, 1 + %251 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %250) + %252 = bitcast i8* %251 to i64* + %253 = load i64, i64* %252, align 4 + %254 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %246, i64 %253) + %255 = bitcast i8* %254 to i2* + %256 = load i2, i2* %255, align 1 + store i2 %249, i2* %255, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 1) + store %Array* %246, %Array** %op__2, align 8 + %257 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %258 = bitcast i8* %257 to i64* + %259 = load i64, i64* %258, align 4 + %260 = add i64 %259, 1 + %261 = sub i64 %nIndices, 1 + %262 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %261) + %263 = bitcast i8* %262 to i64* + %264 = load i64, i64* %263, align 4 + %265 = sub i64 %264, 1 + br label %header__14 + +exiting__13: ; preds = %continue__3 + %266 = add i64 %iOp__1, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + br label %header__15 + +header__14: ; preds = %exiting__14, %body__13 + %i__2 = phi i64 [ %260, %body__13 ], [ %272, %exiting__14 ] + %267 = icmp sle i64 %i__2, %265 + br i1 %267, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %268 = load %Array*, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %268, i32 -1) + %269 = call %Array* @__quantum__rt__array_copy(%Array* %268, i1 false) + %270 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %269, i64 %i__2) + %271 = bitcast i8* %270 to i2* + store i2 -2, i2* %271, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %269, i32 1) + store %Array* %269, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %268, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %272 = add i64 %i__2, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + %273 = icmp eq i64 %nIndices, 4 + br i1 %273, label %then0__3, label %continue__3 + +then0__3: ; preds = %exit__14 + %274 = load %Array*, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %274, i32 -1) + %275 = call %Array* @__quantum__rt__array_copy(%Array* %274, i1 false) + %276 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %277 = bitcast i8* %276 to i64* + %278 = load i64, i64* %277, align 4 + %279 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %280 = bitcast i8* %279 to i64* + %281 = load i64, i64* %280, align 4 + %282 = icmp slt i64 %278, %281 + br i1 %282, label %condTrue__1, label %condContinue__2 + +condTrue__1: ; preds = %then0__3 + %283 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %284 = bitcast i8* %283 to i64* + %285 = load i64, i64* %284, align 4 + %286 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 3) + %287 = bitcast i8* %286 to i64* + %288 = load i64, i64* %287, align 4 + %289 = icmp slt i64 %285, %288 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__1, %then0__3 + %290 = phi i1 [ %289, %condTrue__1 ], [ %282, %then0__3 ] + %291 = select i1 %290, i2 0, i2 -2 + %292 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 1) + %293 = bitcast i8* %292 to i64* + %294 = load i64, i64* %293, align 4 + %295 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %275, i64 %294) + %296 = bitcast i8* %295 to i2* + store i2 %291, i2* %296, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %275, i32 1) + store %Array* %275, %Array** %op__2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %274, i32 -1) + br label %continue__3 + +continue__3: ; preds = %condContinue__2, %exit__14 + %297 = load %Array*, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %297, i32 -1) + %298 = call %Array* @__quantum__rt__array_copy(%Array* %297, i1 false) + %299 = load %Array*, %Array** %op__2, align 8 + %300 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %298, i64 %iOp__1) + %301 = bitcast i8* %300 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %299, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %299, i32 1) + %302 = load %Array*, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %302, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %302, i32 -1) + store %Array* %299, %Array** %301, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %298, i32 1) + store %Array* %298, %Array** %ops, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %235, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %299, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %236, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %237, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %297, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %235, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %299, i32 -1) + br label %exiting__13 + +header__15: ; preds = %exiting__15, %exit__13 + %303 = phi i64 [ 0, %exit__13 ], [ %308, %exiting__15 ] + %304 = icmp sle i64 %303, 1 + br i1 %304, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %305 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %303) + %306 = bitcast i8* %305 to %Array** + %307 = load %Array*, %Array** %306, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %307, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %308 = add i64 %303, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %compactOps__1, i32 -1) + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %309 = phi i64 [ 0, %exit__15 ], [ %314, %exiting__16 ] + %310 = icmp sle i64 %309, 1 + br i1 %310, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %311 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %compactOps__1, i64 %309) + %312 = bitcast i8* %311 to %Array** + %313 = load %Array*, %Array** %312, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %313, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %314 = add i64 %309, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_reference_count(%Array* %compactOps__1, i32 -1) + br label %continue__2 + +header__17: ; preds = %exiting__17, %continue__2 + %315 = phi i64 [ 0, %continue__2 ], [ %320, %exiting__17 ] + %316 = icmp sle i64 %315, %130 + br i1 %316, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %317 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %315) + %318 = bitcast i8* %317 to %Array** + %319 = load %Array*, %Array** %318, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %319, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %320 = add i64 %315, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %128, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %2, i32 -1) + ret %Array* %128 +} + +define internal %Array* @Microsoft__Quantum__Chemistry__JordanWigner__VQE__ExpandedCoefficients__body(%Array* %coeff, i64 %termType) { +entry: + %coeffs = alloca %Array*, align 8 + %nCoeffs = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + store i64 0, i64* %nCoeffs, align 4 + %0 = icmp eq i64 %termType, 2 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + store i64 2, i64* %nCoeffs, align 4 + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i64 %termType, 3 + br i1 %1, label %then1__1, label %else__1 + +then1__1: ; preds = %test1__1 + store i64 8, i64* %nCoeffs, align 4 + br label %continue__1 + +else__1: ; preds = %test1__1 + store i64 1, i64* %nCoeffs, align 4 + br label %continue__1 + +continue__1: ; preds = %else__1, %then1__1, %then0__1 + %2 = load i64, i64* %nCoeffs, align 4 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %2) + %4 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %5 = phi i64 [ 0, %continue__1 ], [ %9, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %5) + %8 = bitcast i8* %7 to double* + store double 0.000000e+00, double* %8, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %3, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %10 = icmp eq i64 %termType, 0 + br i1 %10, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %exit__1 + %11 = icmp eq i64 %termType, 1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %exit__1 + %12 = phi i1 [ %10, %exit__1 ], [ %11, %condFalse__1 ] + br i1 %12, label %then0__2, label %test1__2 + +then0__2: ; preds = %condContinue__1 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + %13 = call %Array* @__quantum__rt__array_copy(%Array* %3, i1 false) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 0) + %15 = bitcast i8* %14 to double* + %16 = load double, double* %15, align 8 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 0) + %18 = bitcast i8* %17 to double* + store double %16, double* %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 1) + store %Array* %13, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + br label %continue__2 + +test1__2: ; preds = %condContinue__1 + %19 = icmp eq i64 %termType, 2 + br i1 %19, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %test1__2 + %20 = icmp eq i64 %termType, 3 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %test1__2 + %21 = phi i1 [ %19, %test1__2 ], [ %20, %condFalse__2 ] + br i1 %21, label %then1__2, label %continue__2 + +then1__2: ; preds = %condContinue__2 + %22 = sub i64 %2, 1 + br label %header__2 + +continue__2: ; preds = %exit__2, %condContinue__2, %then0__2 + %23 = load %Array*, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + ret %Array* %23 + +header__2: ; preds = %exiting__2, %then1__2 + %i = phi i64 [ 0, %then1__2 ], [ %33, %exiting__2 ] + %24 = icmp sle i64 %i, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = load %Array*, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + %26 = call %Array* @__quantum__rt__array_copy(%Array* %25, i1 false) + %27 = sdiv i64 %i, 2 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coeff, i64 %27) + %29 = bitcast i8* %28 to double* + %30 = load double, double* %29, align 8 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %26, i64 %i) + %32 = bitcast i8* %31 to double* + store double %30, double* %32, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + store %Array* %26, %Array** %coeffs, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %i, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + br label %continue__2 +} + +define internal void @Lifted__PartialApplication__41__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Array* }* %2, { i64, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__41__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, %Array* }* getelementptr ({ { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Array* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Array* }* %2, { i64, %Array* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____body({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Array* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Array* }*, %Array* }, { { i64, %Array* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Array* }*, { i64, %Array* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Chemistry__JordanWigner__VQE____QsRef3___PrepareTrialState____adj({ i64, %Array* }* %3, %Array* %4) + ret void +} + +define internal void @MemoryManagement__26__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Array* }*, { i64, %Array* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { double, double }*, %Array* }** + %13 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 0 + %15 = load { double, double }*, { double, double }** %14, align 8 + %16 = bitcast { double, double }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 %count-change) + %19 = bitcast { { double, double }*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 %count-change) + %21 = bitcast { i64, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__26__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Array* }* }, { %Callable*, { i64, %Array* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Array* }*, { i64, %Array* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %4, i32 0, i32 1 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { double, double }*, %Array* }** + %13 = load { { double, double }*, %Array* }*, { { double, double }*, %Array* }** %12, align 8 + %14 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 0 + %15 = load { double, double }*, { double, double }** %14, align 8 + %16 = bitcast { double, double }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { double, double }*, %Array* }, { { double, double }*, %Array* }* %13, i32 0, i32 1 + %18 = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 %count-change) + %19 = bitcast { { double, double }*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 %count-change) + %21 = bitcast { i64, %Array* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Chemistry__JordanWigner__VQE__EstimateTermExpectation__body(%Callable* %inputStateUnitary, %Array* %ops, %Array* %coeffs, i64 %nQubits, i64 %nSamples) { +entry: + %jwTermEnergy = alloca double, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %ops) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 1) + store double 0.000000e+00, double* %jwTermEnergy, align 8 + %8 = call %Array* @Microsoft__Quantum__Arrays___1d2b34a15cf5490eb8142fe0e14c514a_Zipped__body(%Array* %coeffs, %Array* %ops) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %8) + %10 = sub i64 %9, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %11 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %11) + %14 = bitcast i8* %13 to { double, %Array* }** + %15 = load { double, %Array* }*, { double, %Array* }** %14, align 8 + %16 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %15, i32 0, i32 0 + %coeff = load double, double* %16, align 8 + %17 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %15, i32 0, i32 1 + %op = load %Array*, %Array** %17, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 1) + %18 = call double @Microsoft__Quantum__Math__AbsD__body(double %coeff) + %19 = fcmp oge double %18, 1.000000e-10 + br i1 %19, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Measure__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %op, i32 1) + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Callable*, %Array* }* + %23 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %22, i32 0, i32 1 + store %Callable* %20, %Callable** %23, align 8 + store %Array* %op, %Array** %24, align 8 + %25 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__42__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__27__FunctionTable, %Tuple* %21) + %termExpectation = call double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %inputStateUnitary, %Callable* %25, i64 %nQubits, i64 %nSamples) + %26 = load double, double* %jwTermEnergy, align 8 + %27 = fmul double 2.000000e+00, %termExpectation + %28 = fsub double %27, 1.000000e+00 + %29 = fmul double %28, %coeff + %30 = fadd double %26, %29 + store double %30, double* %jwTermEnergy, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %25, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %op, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %31 = add i64 %11, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %32 = load double, double* %jwTermEnergy, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %inputStateUnitary, i32 -1) + %33 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %39, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %ops, i64 %34) + %37 = bitcast i8* %36 to %Array** + %38 = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %39 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %ops, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeffs, i32 -1) + %40 = sub i64 %9, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %41 = phi i64 [ 0, %exit__3 ], [ %49, %exiting__4 ] + %42 = icmp sle i64 %41, %40 + br i1 %42, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %41) + %44 = bitcast i8* %43 to { double, %Array* }** + %45 = load { double, %Array* }*, { double, %Array* }** %44, align 8 + %46 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %45, i32 0, i32 1 + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %47, i32 -1) + %48 = bitcast { double, %Array* }* %45 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %49 = add i64 %41, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + ret double %32 +} + +define internal void @Lifted__PartialApplication__42__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Measure__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Result* }* + %7 = getelementptr inbounds { %Result* }, { %Result* }* %6, i32 0, i32 0 + store %Result* %5, %Result** %7, align 8 + ret void +} + +define internal void @MemoryManagement__27__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__27__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body(%Array* %data, %Array* %termType, i64 %idx) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %data) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %2) + %5 = bitcast i8* %4 to { %Array*, %Array* }** + %6 = load { %Array*, %Array* }*, { %Array*, %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %6, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %idx) + %14 = bitcast i8* %13 to { %Array*, %Array* }** + %15 = load { %Array*, %Array* }*, { %Array*, %Array* }** %14, align 8 + %16 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %15, %Array* %termType) + %17 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %data, i64 %18) + %21 = bitcast i8* %20 to { %Array*, %Array* }** + %22 = load { %Array*, %Array* }*, { %Array*, %Array* }** %21, align 8 + %23 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %22, i32 0, i32 0 + %24 = load %Array*, %Array** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %24, i32 -1) + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %22, i32 0, i32 1 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 -1) + %27 = bitcast { %Array*, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %data, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %16 +} + +define internal { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermToGenIdx__body({ %Array*, %Array* }* %term, %Array* %termType) { +entry: + %0 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %term, i32 0, i32 0 + %idxFermions = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %term, i32 0, i32 1 + %coeff = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + %2 = bitcast { %Array*, %Array* }* %term to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, %Array* }* + %5 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %4, i32 0, i32 1 + store %Array* %termType, %Array** %5, align 8 + store %Array* %coeff, %Array** %6, align 8 + %7 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Simulation__GeneratorIndex__body({ %Array*, %Array* }* %4, %Array* %idxFermions) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %termType, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %idxFermions, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %termType, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coeff, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret { { %Array*, %Array* }*, %Array* }* %7 +} + +define internal void @Lifted__PartialApplication__43__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { i64 }* + %6 = getelementptr inbounds { i64 }, { i64 }* %5, i32 0, i32 0 + %7 = load i64, i64* %6, align 4 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, i64 }* getelementptr ({ %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array*, i64 }* + %10 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %9, i32 0, i32 2 + store %Array* %2, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + store i64 %7, i64* %12, align 4 + %13 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Array*, i64 }, { %Array*, %Array*, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load i64, i64* %3, align 4 + %7 = call { { %Array*, %Array* }*, %Array* }* @Microsoft__Quantum__Chemistry__HTermsToGenIdx__body(%Array* %4, %Array* %5, i64 %6) + %8 = bitcast %Tuple* %result-tuple to { { { %Array*, %Array* }*, %Array* }* }* + %9 = getelementptr inbounds { { { %Array*, %Array* }*, %Array* }* }, { { { %Array*, %Array* }*, %Array* }* }* %8, i32 0, i32 0 + store { { %Array*, %Array* }*, %Array* }* %7, { { %Array*, %Array* }*, %Array* }** %9, align 8 + ret void +} + +define internal void @MemoryManagement__28__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { %Array*, %Array* }** + %11 = load { %Array*, %Array* }*, { %Array*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 %count-change) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %18 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__28__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { %Array*, %Array* }** + %11 = load { %Array*, %Array* }*, { %Array*, %Array* }** %10, align 8 + %12 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %13, i32 %count-change) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %11, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, %Array* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %18 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +attributes #0 = { nofree nosync nounwind readnone speculatable willreturn } diff --git a/src/munchkin/tests/qsharp/parallel-half-moons/qir/parallel-half-moons.ll b/src/munchkin/tests/qsharp/parallel-half-moons/qir/parallel-half-moons.ll new file mode 100644 index 0000000..34fdf44 --- /dev/null +++ b/src/munchkin/tests/qsharp/parallel-half-moons/qir/parallel-half-moons.ll @@ -0,0 +1,33317 @@ + +%Tuple = type opaque +%Array = type opaque +%Range = type { i64, i64, i64 } +%Callable = type opaque +%String = type opaque +%Qubit = type opaque +%Result = type opaque + +@PartialApplication__1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Samples__WithProductKernel__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Samples__WithProductKernel__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__1__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@Microsoft__Quantum__MachineLearning__LabeledSample__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning__LabeledSample__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__Message__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Message__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@0 = internal constant [16 x i8] c"Ready to train.\00" +@Microsoft__Quantum__Math__TimesD__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__TimesD__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__MachineLearning___Features__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning___Features__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__MachineLearning___Label__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning___Label__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@1 = internal constant [28 x i8] c" Pre-encoding samples...\00" +@PartialApplication__2__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__MachineLearning____QsRef0__EncodeSample____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__EncodeSample____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__2__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__2__RefCount, void (%Tuple*, i32)* @MemoryManagement__2__AliasCount] +@2 = internal constant [21 x i8] c" Beginning epoch \00" +@3 = internal constant [2 x i8] c".\00" +@PartialApplication__3__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__MachineLearning____QsRef0__RandomlyRescale____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__RandomlyRescale____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__3__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__3__RefCount, void (%Tuple*, i32)* @MemoryManagement__3__AliasCount] +@Microsoft__Quantum__Logical__NearlyEqualD__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Logical__NearlyEqualD__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__4__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____ctladj__wrapper] +@MemoryManagement__4__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__4__RefCount, void (%Tuple*, i32)* @MemoryManagement__4__AliasCount] +@PartialApplication__5__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__5__ctladj__wrapper] +@Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__ctladj__wrapper] +@MemoryManagement__5__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__5__RefCount, void (%Tuple*, i32)* @MemoryManagement__5__AliasCount] +@PartialApplication__6__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__6__ctladj__wrapper] +@Microsoft__Quantum__Canon___1f5badf5e91544c8bbff3b59164a3bb0_Fst__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___1f5badf5e91544c8bbff3b59164a3bb0_Fst__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___e2a028c390684ab28246f52a0c3fbae9_Snd__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___e2a028c390684ab28246f52a0c3fbae9_Snd__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__7__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__7__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___8a3dda3255e547b68a0799da4c61f944_Subarray__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___8a3dda3255e547b68a0799da4c61f944_Subarray__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__6__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__6__RefCount, void (%Tuple*, i32)* @MemoryManagement__6__AliasCount] +@4 = internal constant [29 x i8] c" Beginning minibatch \00" +@5 = internal constant [5 x i8] c" of \00" +@6 = internal constant [82 x i8] c" Observed good parameter update... estimating and possibly committing.\00" +@7 = internal constant [37 x i8] c" Estimating gradient at sample \00" +@8 = internal constant [4 x i8] c"...\00" +@Microsoft__Quantum__Math__PlusD__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__PlusD__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__8__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__8__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__Measure__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Measure__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__7__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__7__RefCount, void (%Tuple*, i32)* @MemoryManagement__7__AliasCount] +@9 = internal constant [31 x i8] c"Cannot set the phase at index \00" +@10 = internal constant [8 x i8] c", only \00" +@11 = internal constant [29 x i8] c" coefficients were provided.\00" +@PartialApplication__9__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__9__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__MachineLearning____QsRef0__MisclassificationRate____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__MisclassificationRate____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___72deeddd84a741deba305c641ccbb494_Fst__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___72deeddd84a741deba305c641ccbb494_Fst__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___ce11ecc402da481dad234c6ec2301ce8_Snd__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___ce11ecc402da481dad234c6ec2301ce8_Snd__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__8__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__8__RefCount, void (%Tuple*, i32)* @MemoryManagement__8__AliasCount] +@PartialApplication__10__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__10__ctladj__wrapper] +@Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____ctladj__wrapper] +@MemoryManagement__9__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__9__RefCount, void (%Tuple*, i32)* @MemoryManagement__9__AliasCount] +@PartialApplication__11__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__11__ctladj__wrapper] +@Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____ctladj__wrapper] +@MemoryManagement__10__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__10__RefCount, void (%Tuple*, i32)* @MemoryManagement__10__AliasCount] +@Microsoft__Quantum__Canon___9e001bb7f66049e0a843d161f2c1deac_Ignore__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___9e001bb7f66049e0a843d161f2c1deac_Ignore__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__12__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__12__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__MachineLearning__EstimateClassificationProbability__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning__EstimateClassificationProbability__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__11__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__11__RefCount, void (%Tuple*, i32)* @MemoryManagement__11__AliasCount] +@PartialApplication__13__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__13__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__12__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__12__RefCount, void (%Tuple*, i32)* @MemoryManagement__12__AliasCount] +@PartialApplication__14__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__14__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__MachineLearning__InferredLabel__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__MachineLearning__InferredLabel__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__13__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__13__RefCount, void (%Tuple*, i32)* @MemoryManagement__13__AliasCount] +@Microsoft__Quantum__Logical__NotEqualI__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Logical__NotEqualI__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Math__MaxI__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Math__MaxI__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__S__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__I__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__I__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__I__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__I__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__I__ctladj__wrapper] +@PartialApplication__15__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__15__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__Reset__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Reset__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__16__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__16__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@MemoryManagement__14__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__14__RefCount, void (%Tuple*, i32)* @MemoryManagement__14__AliasCount] +@12 = internal constant [75 x i8] c"operation ApplyDiagonalUnitary -- Number of qubits must be greater than 0.\00" +@PartialApplication__17__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__17__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper] +@MemoryManagement__15__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__15__RefCount, void (%Tuple*, i32)* @MemoryManagement__15__AliasCount] +@PartialApplication__18__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__18__ctladj__wrapper] +@Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper] +@MemoryManagement__16__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__16__RefCount, void (%Tuple*, i32)* @MemoryManagement__16__AliasCount] +@Microsoft__Quantum__Intrinsic__H__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] +@PartialApplication__19__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__19__ctladj__wrapper] +@13 = internal constant [38 x i8] c"MultiplexPauli failed. Invalid pauli \00" +@14 = internal constant [7 x i8] c"PauliX\00" +@15 = internal constant [7 x i8] c"PauliY\00" +@16 = internal constant [7 x i8] c"PauliZ\00" +@17 = internal constant [7 x i8] c"PauliI\00" +@PartialApplication__20__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__20__ctladj__wrapper] +@PartialApplication__21__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__21__ctladj__wrapper] +@PartialApplication__22__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__22__ctladj__wrapper] +@PartialApplication__23__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__23__ctladj__wrapper] +@PartialApplication__24__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__24__ctladj__wrapper] +@PartialApplication__25__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__25__ctladj__wrapper] +@PartialApplication__26__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__26__ctladj__wrapper] +@PartialApplication__27__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__27__ctladj__wrapper] +@PartialApplication__28__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__28__ctladj__wrapper] +@PartialApplication__29__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__29__ctladj__wrapper] +@Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____ctladj__wrapper] +@MemoryManagement__17__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__17__RefCount, void (%Tuple*, i32)* @MemoryManagement__17__AliasCount] +@PartialApplication__30__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__30__ctladj__wrapper] +@Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____ctladj__wrapper] +@MemoryManagement__18__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__18__RefCount, void (%Tuple*, i32)* @MemoryManagement__18__AliasCount] +@PartialApplication__31__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__31__ctladj__wrapper] +@Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__ctladj__wrapper] +@MemoryManagement__19__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__19__RefCount, void (%Tuple*, i32)* @MemoryManagement__19__AliasCount] +@18 = internal constant [39 x i8] c"Array must be of the length at least 1\00" +@19 = internal constant [71 x i8] c"Specified output array length must be longer than `inputArray` length.\00" +@Microsoft__Quantum__Arrays___81b2e45870f04b54ac181661cda83d5d___QsRef1__Identity____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___81b2e45870f04b54ac181661cda83d5d___QsRef1__Identity____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___6ed5375d64984881b234f01e25bc55b9___QsRef1__Identity____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___6ed5375d64984881b234f01e25bc55b9___QsRef1__Identity____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___b8c470817e3c4d54a387b72f70fe0572___QsRef1__Identity____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___b8c470817e3c4d54a387b72f70fe0572___QsRef1__Identity____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___9e4eb8c66a5d41c0ab661fccd1f15c41___QsRef1__Identity____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___9e4eb8c66a5d41c0ab661fccd1f15c41___QsRef1__Identity____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Arrays___d0d4b543e4084f10a022319d0e6d7887___QsRef1__Identity____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Arrays___d0d4b543e4084f10a022319d0e6d7887___QsRef1__Identity____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Logical__And__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Logical__And__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@20 = internal constant [27 x i8] c"nElements must be positive\00" +@Microsoft__Quantum__Canon___facc0657b0284c16ae2c0d999b143be0_Fst__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___facc0657b0284c16ae2c0d999b143be0_Fst__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Canon___bfd0dc2872b54301bd24b64a2c23e89e_Snd__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon___bfd0dc2872b54301bd24b64a2c23e89e_Snd__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@21 = internal constant [2 x i8] c"\22\00" +@22 = internal constant [13 x i8] c"\0A\09Expected:\09\00" +@23 = internal constant [5 x i8] c"true\00" +@24 = internal constant [6 x i8] c"false\00" +@25 = internal constant [11 x i8] c"\0A\09Actual:\09\00" +@26 = internal constant [33 x i8] c"`bits` must be between 0 and 63 \00" +@27 = internal constant [34 x i8] c"`number` must be between 0 and 2^\00" +@28 = internal constant [15 x i8] c" - 1, but was \00" +@Microsoft__Quantum__Intrinsic__X__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper] +@Microsoft__Quantum__Logical__Not__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Logical__Not__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@PartialApplication__32__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__32__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____ctladj__wrapper] +@MemoryManagement__20__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__20__RefCount, void (%Tuple*, i32)* @MemoryManagement__20__AliasCount] +@PartialApplication__33__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__33__ctladj__wrapper] +@PartialApplication__34__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__34__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____ctladj__wrapper] +@MemoryManagement__21__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__21__RefCount, void (%Tuple*, i32)* @MemoryManagement__21__AliasCount] +@PartialApplication__35__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__35__ctladj__wrapper] +@Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____ctladj__wrapper] +@MemoryManagement__22__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__22__RefCount, void (%Tuple*, i32)* @MemoryManagement__22__AliasCount] + +define %Array* @Microsoft__Quantum__Samples__ClassifierStructure__body() { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { i64, %Array* }* + %3 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %2, i32 0, i32 1 + store i64 0, i64* %3, align 4 + store %Array* %0, %Array** %4, align 8 + %5 = call { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %2, i2 1, i64 4) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + %6 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { i64, %Array* }* + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %8, i32 0, i32 1 + store i64 0, i64* %9, align 4 + store %Array* %6, %Array** %10, align 8 + %11 = call { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %8, i2 -2, i64 5) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { i64, %Array* }* + %15 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %14, i32 0, i32 0 + %16 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %14, i32 0, i32 1 + store i64 1, i64* %15, align 4 + store %Array* %12, %Array** %16, align 8 + %17 = call { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %14, i2 1, i64 6) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { i64, %Array* }* + %21 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %20, i32 0, i32 1 + store i64 1, i64* %21, align 4 + store %Array* %18, %Array** %22, align 8 + %23 = call { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %20, i2 -2, i64 7) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + %24 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 0) + %26 = bitcast i8* %25 to i64* + store i64 1, i64* %26, align 4 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { i64, %Array* }* + %29 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %28, i32 0, i32 1 + store i64 0, i64* %29, align 4 + store %Array* %24, %Array** %30, align 8 + %31 = call { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %28, i2 1, i64 0) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + %32 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 0) + %34 = bitcast i8* %33 to i64* + store i64 0, i64* %34, align 4 + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, %Array* }* + %37 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %36, i32 0, i32 1 + store i64 1, i64* %37, align 4 + store %Array* %32, %Array** %38, align 8 + %39 = call { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %36, i2 1, i64 1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + %40 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %41 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %42 = bitcast %Tuple* %41 to { i64, %Array* }* + %43 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %42, i32 0, i32 0 + %44 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %42, i32 0, i32 1 + store i64 1, i64* %43, align 4 + store %Array* %40, %Array** %44, align 8 + %45 = call { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %42, i2 -2, i64 2) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + %46 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i64, %Array* }* + %49 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %48, i32 0, i32 1 + store i64 1, i64* %49, align 4 + store %Array* %46, %Array** %50, align 8 + %51 = call { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %48, i2 1, i64 3) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + %52 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 8) + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 0) + %54 = bitcast i8* %53 to { { i64, %Array* }*, i2, i64 }** + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 1) + %56 = bitcast i8* %55 to { { i64, %Array* }*, i2, i64 }** + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 2) + %58 = bitcast i8* %57 to { { i64, %Array* }*, i2, i64 }** + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 3) + %60 = bitcast i8* %59 to { { i64, %Array* }*, i2, i64 }** + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 4) + %62 = bitcast i8* %61 to { { i64, %Array* }*, i2, i64 }** + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 5) + %64 = bitcast i8* %63 to { { i64, %Array* }*, i2, i64 }** + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 6) + %66 = bitcast i8* %65 to { { i64, %Array* }*, i2, i64 }** + %67 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 7) + %68 = bitcast i8* %67 to { { i64, %Array* }*, i2, i64 }** + store { { i64, %Array* }*, i2, i64 }* %5, { { i64, %Array* }*, i2, i64 }** %54, align 8 + store { { i64, %Array* }*, i2, i64 }* %11, { { i64, %Array* }*, i2, i64 }** %56, align 8 + store { { i64, %Array* }*, i2, i64 }* %17, { { i64, %Array* }*, i2, i64 }** %58, align 8 + store { { i64, %Array* }*, i2, i64 }* %23, { { i64, %Array* }*, i2, i64 }** %60, align 8 + store { { i64, %Array* }*, i2, i64 }* %31, { { i64, %Array* }*, i2, i64 }** %62, align 8 + store { { i64, %Array* }*, i2, i64 }* %39, { { i64, %Array* }*, i2, i64 }** %64, align 8 + store { { i64, %Array* }*, i2, i64 }* %45, { { i64, %Array* }*, i2, i64 }** %66, align 8 + store { { i64, %Array* }*, i2, i64 }* %51, { { i64, %Array* }*, i2, i64 }** %68, align 8 + ret %Array* %52 +} + +define internal { { i64, %Array* }*, i2, i64 }* @Microsoft__Quantum__MachineLearning__ControlledRotation__body({ i64, %Array* }* %0, i2 %Axis, i64 %ParameterIndex) { +entry: + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Array* }*, i2, i64 }* getelementptr ({ { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { { i64, %Array* }*, i2, i64 }* + %3 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %2, i32 0, i32 2 + store { i64, %Array* }* %0, { i64, %Array* }** %3, align 8 + store i2 %Axis, i2* %4, align 1 + store i64 %ParameterIndex, i64* %5, align 4 + %6 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 1) + %8 = bitcast { i64, %Array* }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 1) + ret { { i64, %Array* }*, i2, i64 }* %2 +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +define { %Array* }* @Microsoft__Quantum__Samples__DefaultSchedule__body(%Array* %samples) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %8 = sub i64 %0, 1 + %9 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %8, 2 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 ptrtoint (%Range* getelementptr (%Range, %Range* null, i32 1) to i32), i64 1) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 0) + %12 = bitcast i8* %11 to %Range* + store %Range %9, %Range* %12, align 4 + %13 = call { %Array* }* @Microsoft__Quantum__MachineLearning__SamplingSchedule__body(%Array* %10) + %14 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %20, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %15) + %18 = bitcast i8* %17 to %Array** + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %20 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %21 = phi i64 [ 0, %exit__2 ], [ %26, %exiting__3 ] + %22 = icmp sle i64 %21, 0 + br i1 %22, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %21) + %24 = bitcast i8* %23 to %Range* + %25 = load %Range, %Range* %24, align 4 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %26 = add i64 %21, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + ret { %Array* }* %13 +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +define internal { %Array* }* @Microsoft__Quantum__MachineLearning__SamplingSchedule__body(%Array* %__Item1__) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__Item1__) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__Item1__, i64 %2) + %5 = bitcast i8* %4 to %Range* + %6 = load %Range, %Range* %5, align 4 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array* }* + %10 = getelementptr inbounds { %Array* }, { %Array* }* %9, i32 0, i32 0 + store %Array* %__Item1__, %Array** %10, align 8 + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__Item1__) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__Item1__, i64 %13) + %16 = bitcast i8* %15 to %Range* + %17 = load %Range, %Range* %16, align 4 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %__Item1__, i32 1) + %19 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %20 = phi i64 [ 0, %exit__2 ], [ %25, %exiting__3 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__Item1__, i64 %20) + %23 = bitcast i8* %22 to %Range* + %24 = load %Range, %Range* %23, align 4 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %25 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 -1) + ret { %Array* }* %9 +} + +define %Array* @Microsoft__Quantum__Samples__Preprocessed__body(%Array* %samples) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Samples__WithProductKernel__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Callable*, double }* + %11 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %10, i32 0, i32 1 + store %Callable* %8, %Callable** %11, align 8 + store double 1.000000e+00, double* %12, align 8 + %13 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %9) + %14 = call %Array* @Microsoft__Quantum__Arrays___97a84e9393e54d9d8a4592ac93dfb228_Mapped__body(%Callable* %13, %Array* %samples) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %16) + %19 = bitcast i8* %18 to %Array** + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + ret %Array* %14 +} + +define internal %Array* @Microsoft__Quantum__Arrays___97a84e9393e54d9d8a4592ac93dfb228_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %7 = icmp eq i64 %length, 0 + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %9 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %11 = bitcast i8* %10 to %Array** + %12 = load %Array*, %Array** %11, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %12, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %13, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { %Array* }* + %18 = getelementptr inbounds { %Array* }, { %Array* }* %17, i32 0, i32 0 + %first = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %20 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %21 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %9 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %8 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %20 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %27) + %30 = bitcast i8* %29 to %Array** + store %Array* %first, %Array** %30, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %first, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %19, %Array** %retval, align 8 + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %33 = phi i64 [ 0, %exit__3 ], [ %38, %exiting__4 ] + %34 = icmp sle i64 %33, %32 + br i1 %34, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %33) + %36 = bitcast i8* %35 to %Array** + %37 = load %Array*, %Array** %36, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %37, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %38 = add i64 %33, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %39 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %56, %exiting__5 ] + %40 = icmp sle i64 %idx, %39 + br i1 %40, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %41 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %42 = call %Array* @__quantum__rt__array_copy(%Array* %41, i1 false) + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %44 = bitcast i8* %43 to %Array** + %45 = load %Array*, %Array** %44, align 8 + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Array* }* + %48 = getelementptr inbounds { %Array* }, { %Array* }* %47, i32 0, i32 0 + store %Array* %45, %Array** %48, align 8 + %49 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %46, %Tuple* %49) + %50 = bitcast %Tuple* %49 to { %Array* }* + %51 = getelementptr inbounds { %Array* }, { %Array* }* %50, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %idx) + %54 = bitcast i8* %53 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %52, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 1) + %55 = load %Array*, %Array** %54, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + store %Array* %52, %Array** %54, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + store %Array* %42, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %46, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %56 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %57 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %58 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %59 = phi i64 [ 0, %exit__5 ], [ %64, %exiting__6 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %59) + %62 = bitcast i8* %61 to %Array** + %63 = load %Array*, %Array** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %63, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %64 = add i64 %59, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 -1) + %65 = call i64 @__quantum__rt__array_get_size_1d(%Array* %57) + %66 = sub i64 %65, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %67 = phi i64 [ 0, %exit__6 ], [ %72, %exiting__7 ] + %68 = icmp sle i64 %67, %66 + br i1 %68, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %57, i64 %67) + %70 = bitcast i8* %69 to %Array** + %71 = load %Array*, %Array** %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %71, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %72 = add i64 %67, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret %Array* %57 +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array* }* getelementptr ({ double, %Array* }, { double, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Array* }* + %8 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Samples__WithProductKernel__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array* }* + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Array* @Microsoft__Quantum__Samples__WithProductKernel__body(double %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %5, %Array** %7, align 8 + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define %Array* @Microsoft__Quantum__Samples__WithProductKernel__body(double %scale, %Array* %sample) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %sample, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__TimesD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call double @Microsoft__Quantum__Arrays___64d768d0751b4ad8b5cf130c7bf24274_Fold__body(%Callable* %0, double 1.000000e+00, %Array* %sample) + %2 = fmul double %scale, %1 + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to double* + store double %2, double* %5, align 8 + %6 = call %Array* @__quantum__rt__array_concatenate(%Array* %sample, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %sample, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + ret %Array* %6 +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define { %Array*, double, i64 }* @Microsoft__Quantum__Samples__TrainHalfMoonModelAtStartPoint__body(%Array* %trainingVectors, %Array* %trainingLabels, %Array* %startPoint) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %trainingVectors) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %trainingVectors, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %trainingVectors, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %trainingLabels, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %startPoint, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning__LabeledSample__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = call %Array* @Microsoft__Quantum__Samples__Preprocessed__body(%Array* %trainingVectors) + %10 = call %Array* @Microsoft__Quantum__Arrays___23e2330a73974b3abb47f8506d246967_Zipped__body(%Array* %9, %Array* %trainingLabels) + %samples = call %Array* @Microsoft__Quantum__Arrays___c90f4b37e41846f5a59f0fb238007c41_Mapped__body(%Callable* %8, %Array* %10) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %13) + %16 = bitcast i8* %15 to { %Array*, i64 }** + %17 = load { %Array*, i64 }*, { %Array*, i64 }** %16, align 8 + %18 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %17, i32 0, i32 0 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %20 = bitcast { %Array*, i64 }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %22 = call { double, double, i64, i64, i64, i64, double, i64, %Callable* }* @Microsoft__Quantum__MachineLearning__DefaultTrainingOptions__body() + %23 = bitcast { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %22 to %Tuple* + %24 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %23, i1 false) + %25 = bitcast %Tuple* %24 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %26 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %25, i32 0, i32 0 + store double 1.000000e-01, double* %26, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + %27 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %24, i1 false) + %28 = bitcast %Tuple* %27 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %29 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %28, i32 0, i32 2 + store i64 15, i64* %29, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + %30 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %27, i1 false) + %31 = bitcast %Tuple* %30 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %32 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %31, i32 0, i32 1 + store double 5.000000e-03, double* %32, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + %33 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %30, i1 false) + %34 = bitcast %Tuple* %33 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %35 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %34, i32 0, i32 3 + store i64 10000, i64* %35, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + %36 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %33, i1 false) + %37 = bitcast %Tuple* %36 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %38 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %37, i32 0, i32 4 + store i64 2, i64* %38, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + %39 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %36, i1 false) + %options = bitcast %Tuple* %39 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %40 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 8 + %41 = load %Callable*, %Callable** %40, align 8 + %42 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Message__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + store %Callable* %42, %Callable** %40, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %42, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %42, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %39, i32 1) + %43 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([16 x i8], [16 x i8]* @0, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %43) + %44 = call %Array* @Microsoft__Quantum__Samples__ClassifierStructure__body() + %45 = call { %Array*, %Array*, double }* @Microsoft__Quantum__MachineLearning__SequentialModel__body(%Array* %44, %Array* %startPoint, double 0.000000e+00) + %46 = call { %Array* }* @Microsoft__Quantum__Samples__DefaultSchedule__body(%Array* %trainingVectors) + %47 = call { %Array* }* @Microsoft__Quantum__Samples__DefaultSchedule__body(%Array* %trainingVectors) + %48 = call { { %Array*, %Array*, double }*, i64 }* @Microsoft__Quantum__MachineLearning__TrainSequentialClassifierAtModel__body({ %Array*, %Array*, double }* %45, %Array* %samples, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, { %Array* }* %46, { %Array* }* %47) + %49 = getelementptr inbounds { { %Array*, %Array*, double }*, i64 }, { { %Array*, %Array*, double }*, i64 }* %48, i32 0, i32 0 + %optimizedModel = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %49, align 8 + %50 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %optimizedModel, i32 0, i32 0 + %51 = load %Array*, %Array** %50, align 8 + %52 = call i64 @__quantum__rt__array_get_size_1d(%Array* %51) + %53 = sub i64 %52, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %54 = phi i64 [ 0, %exit__2 ], [ %65, %exiting__3 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 %54) + %57 = bitcast i8* %56 to { { i64, %Array* }*, i2, i64 }** + %58 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %57, align 8 + %59 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %58, i32 0, i32 0 + %60 = load { i64, %Array* }*, { i64, %Array* }** %59, align 8 + %61 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %60, i32 0, i32 1 + %62 = load %Array*, %Array** %61, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %62, i32 1) + %63 = bitcast { i64, %Array* }* %60 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %63, i32 1) + %64 = bitcast { { i64, %Array* }*, i2, i64 }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %64, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %65 = add i64 %54, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 1) + %66 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %optimizedModel, i32 0, i32 1 + %67 = load %Array*, %Array** %66, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %67, i32 1) + %68 = bitcast { %Array*, %Array*, double }* %optimizedModel to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %68, i32 1) + %69 = getelementptr inbounds { { %Array*, %Array*, double }*, i64 }, { { %Array*, %Array*, double }*, i64 }* %48, i32 0, i32 1 + %nMisses = load i64, i64* %69, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %67, i32 1) + %70 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %optimizedModel, i32 0, i32 2 + %71 = load double, double* %70, align 8 + %72 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, i64 }* getelementptr ({ %Array*, double, i64 }, { %Array*, double, i64 }* null, i32 1) to i64)) + %73 = bitcast %Tuple* %72 to { %Array*, double, i64 }* + %74 = getelementptr inbounds { %Array*, double, i64 }, { %Array*, double, i64 }* %73, i32 0, i32 0 + %75 = getelementptr inbounds { %Array*, double, i64 }, { %Array*, double, i64 }* %73, i32 0, i32 1 + %76 = getelementptr inbounds { %Array*, double, i64 }, { %Array*, double, i64 }* %73, i32 0, i32 2 + store %Array* %67, %Array** %74, align 8 + store double %71, double* %75, align 8 + store i64 %nMisses, i64* %76, align 4 + %77 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %45, i32 0, i32 0 + %78 = load %Array*, %Array** %77, align 8 + %79 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %45, i32 0, i32 1 + %80 = load %Array*, %Array** %79, align 8 + %81 = getelementptr inbounds { %Array* }, { %Array* }* %46, i32 0, i32 0 + %82 = load %Array*, %Array** %81, align 8 + %83 = getelementptr inbounds { %Array* }, { %Array* }* %47, i32 0, i32 0 + %84 = load %Array*, %Array** %83, align 8 + %85 = sub i64 %0, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %86 = phi i64 [ 0, %exit__3 ], [ %91, %exiting__4 ] + %87 = icmp sle i64 %86, %85 + br i1 %87, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %trainingVectors, i64 %86) + %89 = bitcast i8* %88 to %Array** + %90 = load %Array*, %Array** %89, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %90, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %91 = add i64 %86, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %trainingVectors, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %trainingLabels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %startPoint, i32 -1) + %92 = sub i64 %11, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %93 = phi i64 [ 0, %exit__4 ], [ %101, %exiting__5 ] + %94 = icmp sle i64 %93, %92 + br i1 %94, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %93) + %96 = bitcast i8* %95 to { %Array*, i64 }** + %97 = load { %Array*, i64 }*, { %Array*, i64 }** %96, align 8 + %98 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %97, i32 0, i32 0 + %99 = load %Array*, %Array** %98, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + %100 = bitcast { %Array*, i64 }* %97 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %100, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %101 = add i64 %93, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %42, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %42, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %39, i32 -1) + %102 = sub i64 %52, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %103 = phi i64 [ 0, %exit__5 ], [ %114, %exiting__6 ] + %104 = icmp sle i64 %103, %102 + br i1 %104, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 %103) + %106 = bitcast i8* %105 to { { i64, %Array* }*, i2, i64 }** + %107 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %106, align 8 + %108 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %107, i32 0, i32 0 + %109 = load { i64, %Array* }*, { i64, %Array* }** %108, align 8 + %110 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %109, i32 0, i32 1 + %111 = load %Array*, %Array** %110, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %111, i32 -1) + %112 = bitcast { i64, %Array* }* %109 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %112, i32 -1) + %113 = bitcast { { i64, %Array* }*, i2, i64 }* %107 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %113, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %114 = add i64 %103, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %67, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %68, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + %115 = call i64 @__quantum__rt__array_get_size_1d(%Array* %9) + %116 = sub i64 %115, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %117 = phi i64 [ 0, %exit__6 ], [ %122, %exiting__7 ] + %118 = icmp sle i64 %117, %116 + br i1 %118, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %119 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 %117) + %120 = bitcast i8* %119 to %Array** + %121 = load %Array*, %Array** %120, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %121, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %122 = add i64 %117, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + %123 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %124 = sub i64 %123, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %125 = phi i64 [ 0, %exit__7 ], [ %133, %exiting__8 ] + %126 = icmp sle i64 %125, %124 + br i1 %126, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %127 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %125) + %128 = bitcast i8* %127 to { %Array*, i64 }** + %129 = load { %Array*, i64 }*, { %Array*, i64 }** %128, align 8 + %130 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %129, i32 0, i32 0 + %131 = load %Array*, %Array** %130, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %131, i32 -1) + %132 = bitcast { %Array*, i64 }* %129 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %132, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %133 = add i64 %125, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + %134 = sub i64 %11, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %135 = phi i64 [ 0, %exit__8 ], [ %143, %exiting__9 ] + %136 = icmp sle i64 %135, %134 + br i1 %136, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %137 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %135) + %138 = bitcast i8* %137 to { %Array*, i64 }** + %139 = load { %Array*, i64 }*, { %Array*, i64 }** %138, align 8 + %140 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %139, i32 0, i32 0 + %141 = load %Array*, %Array** %140, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %141, i32 -1) + %142 = bitcast { %Array*, i64 }* %139 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %142, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %143 = add i64 %135, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %samples, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %42, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %42, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %144 = call i64 @__quantum__rt__array_get_size_1d(%Array* %44) + %145 = sub i64 %144, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %146 = phi i64 [ 0, %exit__9 ], [ %157, %exiting__10 ] + %147 = icmp sle i64 %146, %145 + br i1 %147, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %148 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %146) + %149 = bitcast i8* %148 to { { i64, %Array* }*, i2, i64 }** + %150 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %149, align 8 + %151 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %150, i32 0, i32 0 + %152 = load { i64, %Array* }*, { i64, %Array* }** %151, align 8 + %153 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %152, i32 0, i32 1 + %154 = load %Array*, %Array** %153, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %154, i32 -1) + %155 = bitcast { i64, %Array* }* %152 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %155, i32 -1) + %156 = bitcast { { i64, %Array* }*, i2, i64 }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %156, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %157 = add i64 %146, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + %158 = call i64 @__quantum__rt__array_get_size_1d(%Array* %78) + %159 = sub i64 %158, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %160 = phi i64 [ 0, %exit__10 ], [ %171, %exiting__11 ] + %161 = icmp sle i64 %160, %159 + br i1 %161, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 %160) + %163 = bitcast i8* %162 to { { i64, %Array* }*, i2, i64 }** + %164 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %163, align 8 + %165 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %164, i32 0, i32 0 + %166 = load { i64, %Array* }*, { i64, %Array* }** %165, align 8 + %167 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %166, i32 0, i32 1 + %168 = load %Array*, %Array** %167, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %168, i32 -1) + %169 = bitcast { i64, %Array* }* %166 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %169, i32 -1) + %170 = bitcast { { i64, %Array* }*, i2, i64 }* %164 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %170, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %171 = add i64 %160, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + %172 = bitcast { %Array*, %Array*, double }* %45 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %172, i32 -1) + %173 = call i64 @__quantum__rt__array_get_size_1d(%Array* %82) + %174 = sub i64 %173, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %175 = phi i64 [ 0, %exit__11 ], [ %180, %exiting__12 ] + %176 = icmp sle i64 %175, %174 + br i1 %176, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %177 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 %175) + %178 = bitcast i8* %177 to %Range* + %179 = load %Range, %Range* %178, align 4 + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %180 = add i64 %175, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + %181 = bitcast { %Array* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %181, i32 -1) + %182 = call i64 @__quantum__rt__array_get_size_1d(%Array* %84) + %183 = sub i64 %182, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %184 = phi i64 [ 0, %exit__12 ], [ %189, %exiting__13 ] + %185 = icmp sle i64 %184, %183 + br i1 %185, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %84, i64 %184) + %187 = bitcast i8* %186 to %Range* + %188 = load %Range, %Range* %187, align 4 + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %189 = add i64 %184, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %84, i32 -1) + %190 = bitcast { %Array* }* %47 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %190, i32 -1) + %191 = sub i64 %52, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %192 = phi i64 [ 0, %exit__13 ], [ %203, %exiting__14 ] + %193 = icmp sle i64 %192, %191 + br i1 %193, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %194 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %51, i64 %192) + %195 = bitcast i8* %194 to { { i64, %Array* }*, i2, i64 }** + %196 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %195, align 8 + %197 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %196, i32 0, i32 0 + %198 = load { i64, %Array* }*, { i64, %Array* }** %197, align 8 + %199 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %198, i32 0, i32 1 + %200 = load %Array*, %Array** %199, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %200, i32 -1) + %201 = bitcast { i64, %Array* }* %198 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %201, i32 -1) + %202 = bitcast { { i64, %Array* }*, i2, i64 }* %196 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %202, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %203 = add i64 %192, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %51, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %67, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %68, i32 -1) + %204 = bitcast { { %Array*, %Array*, double }*, i64 }* %48 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %204, i32 -1) + ret { %Array*, double, i64 }* %73 +} + +define internal %Array* @Microsoft__Quantum__Arrays___c90f4b37e41846f5a59f0fb238007c41_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { %Array*, i64 }** + %5 = load { %Array*, i64 }*, { %Array*, i64 }** %4, align 8 + %6 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %7, i32 1) + %8 = bitcast { %Array*, i64 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %10 = icmp eq i64 %length, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %12 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %14 = bitcast i8* %13 to { %Array*, i64 }** + %15 = load { %Array*, i64 }*, { %Array*, i64 }** %14, align 8 + %16 = bitcast { %Array*, i64 }* %15 to %Tuple* + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }* }* getelementptr ({ { %Array*, i64 }* }, { { %Array*, i64 }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %16, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { { %Array*, i64 }* }* + %19 = getelementptr inbounds { { %Array*, i64 }* }, { { %Array*, i64 }* }* %18, i32 0, i32 0 + %first = load { %Array*, i64 }*, { %Array*, i64 }** %19, align 8 + %20 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %first, i32 0, i32 0 + %21 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %22 = bitcast { %Array*, i64 }* %first to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %24 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %33, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %25) + %28 = bitcast i8* %27 to { %Array*, i64 }** + %29 = load { %Array*, i64 }*, { %Array*, i64 }** %28, align 8 + %30 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %29, i32 0, i32 0 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 -1) + %32 = bitcast { %Array*, i64 }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %34 = phi i64 [ 0, %continue__1 ], [ %39, %exiting__3 ] + %35 = icmp sle i64 %34, %24 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %34) + %37 = bitcast i8* %36 to { %Array*, i64 }** + store { %Array*, i64 }* %first, { %Array*, i64 }** %37, align 8 + %38 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %39 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %retval, align 8 + %40 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %41 = phi i64 [ 0, %exit__3 ], [ %49, %exiting__4 ] + %42 = icmp sle i64 %41, %40 + br i1 %42, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %41) + %44 = bitcast i8* %43 to { %Array*, i64 }** + %45 = load { %Array*, i64 }*, { %Array*, i64 }** %44, align 8 + %46 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %45, i32 0, i32 0 + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + %48 = bitcast { %Array*, i64 }* %45 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %49 = add i64 %41, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %50 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %71, %exiting__5 ] + %51 = icmp sle i64 %idx, %50 + br i1 %51, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %52 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %52, i32 -1) + %53 = call %Array* @__quantum__rt__array_copy(%Array* %52, i1 false) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %55 = bitcast i8* %54 to { %Array*, i64 }** + %56 = load { %Array*, i64 }*, { %Array*, i64 }** %55, align 8 + %57 = bitcast { %Array*, i64 }* %56 to %Tuple* + %58 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }* }* getelementptr ({ { %Array*, i64 }* }, { { %Array*, i64 }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %57, %Tuple* %58) + %59 = bitcast %Tuple* %58 to { { %Array*, i64 }* }* + %60 = getelementptr inbounds { { %Array*, i64 }* }, { { %Array*, i64 }* }* %59, i32 0, i32 0 + %61 = load { %Array*, i64 }*, { %Array*, i64 }** %60, align 8 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %idx) + %63 = bitcast i8* %62 to { %Array*, i64 }** + %64 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %61, i32 0, i32 0 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 1) + %66 = bitcast { %Array*, i64 }* %61 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 1) + %67 = load { %Array*, i64 }*, { %Array*, i64 }** %63, align 8 + %68 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %67, i32 0, i32 0 + %69 = load %Array*, %Array** %68, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 -1) + %70 = bitcast { %Array*, i64 }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %70, i32 -1) + store { %Array*, i64 }* %61, { %Array*, i64 }** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + store %Array* %53, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %58, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %71 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %72 = load %Array*, %Array** %retval, align 8 + %73 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %74 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %75 = phi i64 [ 0, %exit__5 ], [ %83, %exiting__6 ] + %76 = icmp sle i64 %75, %74 + br i1 %76, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %75) + %78 = bitcast i8* %77 to { %Array*, i64 }** + %79 = load { %Array*, i64 }*, { %Array*, i64 }** %78, align 8 + %80 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %79, i32 0, i32 0 + %81 = load %Array*, %Array** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + %82 = bitcast { %Array*, i64 }* %79 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %82, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %83 = add i64 %75, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 -1) + %84 = call i64 @__quantum__rt__array_get_size_1d(%Array* %72) + %85 = sub i64 %84, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %86 = phi i64 [ 0, %exit__6 ], [ %94, %exiting__7 ] + %87 = icmp sle i64 %86, %85 + br i1 %87, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %72, i64 %86) + %89 = bitcast i8* %88 to { %Array*, i64 }** + %90 = load { %Array*, i64 }*, { %Array*, i64 }** %89, align 8 + %91 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %90, i32 0, i32 0 + %92 = load %Array*, %Array** %91, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %92, i32 -1) + %93 = bitcast { %Array*, i64 }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %93, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %94 = add i64 %86, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %73, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + ret %Array* %72 +} + +define internal void @Microsoft__Quantum__MachineLearning__LabeledSample__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, i64 }* + %1 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load i64, i64* %2, align 4 + %5 = call { %Array*, i64 }* @Microsoft__Quantum__MachineLearning__LabeledSample__body(%Array* %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { { %Array*, i64 }* }* + %7 = getelementptr inbounds { { %Array*, i64 }* }, { { %Array*, i64 }* }* %6, i32 0, i32 0 + store { %Array*, i64 }* %5, { %Array*, i64 }** %7, align 8 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___23e2330a73974b3abb47f8506d246967_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %9 = icmp slt i64 %0, %8 + br i1 %9, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__1 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %8, %condFalse__1 ] + %10 = icmp eq i64 %nElements, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + %12 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %condContinue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %14 = bitcast i8* %13 to %Array** + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %17 = bitcast i8* %16 to i64* + %18 = load i64, i64* %17, align 4 + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, i64 }* getelementptr ({ %Array*, i64 }, { %Array*, i64 }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, i64 }* + %21 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %20, i32 0, i32 1 + store %Array* %15, %Array** %21, align 8 + store i64 %18, i64* %22, align 4 + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %24 = sub i64 %nElements, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %26 = icmp sle i64 %25, %12 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %25) + %28 = bitcast i8* %27 to %Array** + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %24 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %31) + %34 = bitcast i8* %33 to { %Array*, i64 }** + store { %Array*, i64 }* %20, { %Array*, i64 }** %34, align 8 + %35 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %output, align 8 + %37 = sub i64 %nElements, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %38 = phi i64 [ 0, %exit__3 ], [ %46, %exiting__4 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %38) + %41 = bitcast i8* %40 to { %Array*, i64 }** + %42 = load { %Array*, i64 }*, { %Array*, i64 }** %41, align 8 + %43 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %42, i32 0, i32 0 + %44 = load %Array*, %Array** %43, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %45 = bitcast { %Array*, i64 }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %47 = sub i64 %nElements, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idxElement = phi i64 [ 1, %exit__4 ], [ %67, %exiting__5 ] + %48 = icmp sle i64 %idxElement, %47 + br i1 %48, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + %50 = call %Array* @__quantum__rt__array_copy(%Array* %49, i1 false) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %52 = bitcast i8* %51 to %Array** + %53 = load %Array*, %Array** %52, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 1) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %55 = bitcast i8* %54 to i64* + %56 = load i64, i64* %55, align 4 + %57 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, i64 }* getelementptr ({ %Array*, i64 }, { %Array*, i64 }* null, i32 1) to i64)) + %58 = bitcast %Tuple* %57 to { %Array*, i64 }* + %59 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %58, i32 0, i32 0 + %60 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %58, i32 0, i32 1 + store %Array* %53, %Array** %59, align 8 + store i64 %56, i64* %60, align 4 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %50, i64 %idxElement) + %62 = bitcast i8* %61 to { %Array*, i64 }** + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %63 = load { %Array*, i64 }*, { %Array*, i64 }** %62, align 8 + %64 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %63, i32 0, i32 0 + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = bitcast { %Array*, i64 }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %66, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %66, i32 -1) + store { %Array*, i64 }* %58, { %Array*, i64 }** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + store %Array* %50, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %67 = add i64 %idxElement, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %68 = load %Array*, %Array** %output, align 8 + %69 = load %Array*, %Array** %21, align 8 + %70 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %76, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %71) + %74 = bitcast i8* %73 to %Array** + %75 = load %Array*, %Array** %74, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %76 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %77 = call i64 @__quantum__rt__array_get_size_1d(%Array* %68) + %78 = sub i64 %77, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %79 = phi i64 [ 0, %exit__6 ], [ %87, %exiting__7 ] + %80 = icmp sle i64 %79, %78 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %68, i64 %79) + %82 = bitcast i8* %81 to { %Array*, i64 }** + %83 = load { %Array*, i64 }*, { %Array*, i64 }** %82, align 8 + %84 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %83, i32 0, i32 0 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { %Array*, i64 }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %87 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %68 +} + +define internal { double, double, i64, i64, i64, i64, double, i64, %Callable* }* @Microsoft__Quantum__MachineLearning__DefaultTrainingOptions__body() { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___9e001bb7f66049e0a843d161f2c1deac_Ignore__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call { double, double, i64, i64, i64, i64, double, i64, %Callable* }* @Microsoft__Quantum__MachineLearning__TrainingOptions__body(double 1.000000e-01, double 5.000000e-03, i64 15, i64 10000, i64 16, i64 8, double 1.000000e-02, i64 1, %Callable* %0) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1 +} + +declare %Tuple* @__quantum__rt__tuple_copy(%Tuple*, i1) + +define internal void @Microsoft__Quantum__Intrinsic__Message__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %String* }* + %1 = getelementptr inbounds { %String* }, { %String* }* %0, i32 0, i32 0 + %2 = load %String*, %String** %1, align 8 + call void @__quantum__rt__message(%String* %2) + ret void +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__message(%String*) + +define internal { { %Array*, %Array*, double }*, i64 }* @Microsoft__Quantum__MachineLearning__TrainSequentialClassifierAtModel__body({ %Array*, %Array*, double }* %model, %Array* %samples, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, { %Array* }* %trainingSchedule, { %Array* }* %validationSchedule) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %21) + %24 = bitcast i8* %23 to { %Array*, i64 }** + %25 = load { %Array*, i64 }*, { %Array*, i64 }** %24, align 8 + %26 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %25, i32 0, i32 0 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %28 = bitcast { %Array*, i64 }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %30 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 8 + %31 = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %31, i32 1) + %32 = bitcast { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 1) + %33 = getelementptr inbounds { %Array* }, { %Array* }* %trainingSchedule, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + %35 = call i64 @__quantum__rt__array_get_size_1d(%Array* %34) + %36 = sub i64 %35, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %37 = phi i64 [ 0, %exit__2 ], [ %42, %exiting__3 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 %37) + %40 = bitcast i8* %39 to %Range* + %41 = load %Range, %Range* %40, align 4 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %42 = add i64 %37, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 1) + %43 = bitcast { %Array* }* %trainingSchedule to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 1) + %44 = getelementptr inbounds { %Array* }, { %Array* }* %validationSchedule, i32 0, i32 0 + %45 = load %Array*, %Array** %44, align 8 + %46 = call i64 @__quantum__rt__array_get_size_1d(%Array* %45) + %47 = sub i64 %46, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %48 = phi i64 [ 0, %exit__3 ], [ %53, %exiting__4 ] + %49 = icmp sle i64 %48, %47 + br i1 %49, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %50 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %45, i64 %48) + %51 = bitcast i8* %50 to %Range* + %52 = load %Range, %Range* %51, align 4 + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %53 = add i64 %48, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 1) + %54 = bitcast { %Array* }* %validationSchedule to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 1) + %optimizedModel = call { %Array*, %Array*, double }* @Microsoft__Quantum__MachineLearning____QsRef0___TrainSequentialClassifierAtModel____body({ %Array*, %Array*, double }* %model, %Array* %samples, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, { %Array* }* %trainingSchedule) + %55 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %optimizedModel, i32 0, i32 0 + %56 = load %Array*, %Array** %55, align 8 + %57 = call i64 @__quantum__rt__array_get_size_1d(%Array* %56) + %58 = sub i64 %57, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %59 = phi i64 [ 0, %exit__4 ], [ %70, %exiting__5 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %59) + %62 = bitcast i8* %61 to { { i64, %Array* }*, i2, i64 }** + %63 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %62, align 8 + %64 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %63, i32 0, i32 0 + %65 = load { i64, %Array* }*, { i64, %Array* }** %64, align 8 + %66 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %65, i32 0, i32 1 + %67 = load %Array*, %Array** %66, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %67, i32 1) + %68 = bitcast { i64, %Array* }* %65 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %68, i32 1) + %69 = bitcast { { i64, %Array* }*, i2, i64 }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %69, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %70 = add i64 %59, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 1) + %71 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %optimizedModel, i32 0, i32 1 + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 1) + %73 = bitcast { %Array*, %Array*, double }* %optimizedModel to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 1) + %74 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning___Label__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %labels = call %Array* @Microsoft__Quantum__Arrays___f34491685bf044f1939458f941be92ef_Mapped__body(%Callable* %74, %Array* %samples) + call void @__quantum__rt__array_update_alias_count(%Array* %labels, i32 1) + %75 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning___Features__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %features = call %Array* @Microsoft__Quantum__Arrays___458019d5b77947a88477997a20fc14c5_Mapped__body(%Callable* %75, %Array* %samples) + %76 = call i64 @__quantum__rt__array_get_size_1d(%Array* %features) + %77 = sub i64 %76, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %78 = phi i64 [ 0, %exit__5 ], [ %83, %exiting__6 ] + %79 = icmp sle i64 %78, %77 + br i1 %79, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %78) + %81 = bitcast i8* %80 to %Array** + %82 = load %Array*, %Array** %81, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %82, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %83 = add i64 %78, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 1) + %84 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 1 + %85 = load double, double* %84, align 8 + %86 = call %Array* @Microsoft__Quantum__MachineLearning___6dd27c99de61421cb8da3bf3154034a7_Sampled__body({ %Array* }* %validationSchedule, %Array* %features) + %87 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 3 + %88 = load i64, i64* %87, align 4 + %probabilities = call %Array* @Microsoft__Quantum__MachineLearning__EstimateClassificationProbabilities__body(double %85, { %Array*, %Array*, double }* %optimizedModel, %Array* %86, i64 %88) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 1) + %89 = call %Array* @Microsoft__Quantum__MachineLearning___9df2eba66a764c8abe9e53ac519cccaa_Sampled__body({ %Array* }* %validationSchedule, %Array* %labels) + %90 = call %Array* @Microsoft__Quantum__Arrays___e71c6b9cbb804917a4d7cd04011f2188_Zipped__body(%Array* %probabilities, %Array* %89) + %localBias = call double @Microsoft__Quantum__MachineLearning____QsRef0__UpdatedBias____body(%Array* %90, double 0.000000e+00, double %85) + %localPL = call %Array* @Microsoft__Quantum__MachineLearning__InferredLabels__body(double %localBias, %Array* %probabilities) + call void @__quantum__rt__array_update_alias_count(%Array* %localPL, i32 1) + %91 = call %Array* @Microsoft__Quantum__MachineLearning___9df2eba66a764c8abe9e53ac519cccaa_Sampled__body({ %Array* }* %validationSchedule, %Array* %labels) + %localMisses = call i64 @Microsoft__Quantum__MachineLearning__NMisclassifications__body(%Array* %localPL, %Array* %91) + %92 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %73, i1 false) + %93 = bitcast %Tuple* %92 to { %Array*, %Array*, double }* + %94 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %93, i32 0, i32 2 + store double %localBias, double* %94, align 8 + %95 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %93, i32 0, i32 0 + %96 = load %Array*, %Array** %95, align 8 + %97 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %93, i32 0, i32 1 + %98 = load %Array*, %Array** %97, align 8 + %99 = call i64 @__quantum__rt__array_get_size_1d(%Array* %96) + %100 = sub i64 %99, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %101 = phi i64 [ 0, %exit__6 ], [ %112, %exiting__7 ] + %102 = icmp sle i64 %101, %100 + br i1 %102, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %96, i64 %101) + %104 = bitcast i8* %103 to { { i64, %Array* }*, i2, i64 }** + %105 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %104, align 8 + %106 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %105, i32 0, i32 0 + %107 = load { i64, %Array* }*, { i64, %Array* }** %106, align 8 + %108 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %107, i32 0, i32 1 + %109 = load %Array*, %Array** %108, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %109, i32 1) + %110 = bitcast { i64, %Array* }* %107 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 1) + %111 = bitcast { { i64, %Array* }*, i2, i64 }* %105 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %111, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %112 = add i64 %101, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %96, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %98, i32 1) + %113 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, i64 }* getelementptr ({ { %Array*, %Array*, double }*, i64 }, { { %Array*, %Array*, double }*, i64 }* null, i32 1) to i64)) + %114 = bitcast %Tuple* %113 to { { %Array*, %Array*, double }*, i64 }* + %115 = getelementptr inbounds { { %Array*, %Array*, double }*, i64 }, { { %Array*, %Array*, double }*, i64 }* %114, i32 0, i32 0 + %116 = getelementptr inbounds { { %Array*, %Array*, double }*, i64 }, { { %Array*, %Array*, double }*, i64 }* %114, i32 0, i32 1 + store { %Array*, %Array*, double }* %93, { %Array*, %Array*, double }** %115, align 8 + store i64 %localMisses, i64* %116, align 4 + %117 = sub i64 %2, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %118 = phi i64 [ 0, %exit__7 ], [ %129, %exiting__8 ] + %119 = icmp sle i64 %118, %117 + br i1 %119, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %118) + %121 = bitcast i8* %120 to { { i64, %Array* }*, i2, i64 }** + %122 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %121, align 8 + %123 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %122, i32 0, i32 0 + %124 = load { i64, %Array* }*, { i64, %Array* }** %123, align 8 + %125 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %124, i32 0, i32 1 + %126 = load %Array*, %Array** %125, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %126, i32 -1) + %127 = bitcast { i64, %Array* }* %124 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %127, i32 -1) + %128 = bitcast { { i64, %Array* }*, i2, i64 }* %122 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %129 = add i64 %118, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + %130 = sub i64 %19, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %131 = phi i64 [ 0, %exit__8 ], [ %139, %exiting__9 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %131) + %134 = bitcast i8* %133 to { %Array*, i64 }** + %135 = load { %Array*, i64 }*, { %Array*, i64 }** %134, align 8 + %136 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %135, i32 0, i32 0 + %137 = load %Array*, %Array** %136, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %137, i32 -1) + %138 = bitcast { %Array*, i64 }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %138, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %139 = add i64 %131, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %31, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + %140 = sub i64 %35, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %141 = phi i64 [ 0, %exit__9 ], [ %146, %exiting__10 ] + %142 = icmp sle i64 %141, %140 + br i1 %142, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %143 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 %141) + %144 = bitcast i8* %143 to %Range* + %145 = load %Range, %Range* %144, align 4 + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %146 = add i64 %141, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + %147 = sub i64 %46, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %148 = phi i64 [ 0, %exit__10 ], [ %153, %exiting__11 ] + %149 = icmp sle i64 %148, %147 + br i1 %149, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %150 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %45, i64 %148) + %151 = bitcast i8* %150 to %Range* + %152 = load %Range, %Range* %151, align 4 + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %153 = add i64 %148, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 -1) + %154 = sub i64 %57, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %155 = phi i64 [ 0, %exit__11 ], [ %166, %exiting__12 ] + %156 = icmp sle i64 %155, %154 + br i1 %156, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %157 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %155) + %158 = bitcast i8* %157 to { { i64, %Array* }*, i2, i64 }** + %159 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %158, align 8 + %160 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %159, i32 0, i32 0 + %161 = load { i64, %Array* }*, { i64, %Array* }** %160, align 8 + %162 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %161, i32 0, i32 1 + %163 = load %Array*, %Array** %162, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %163, i32 -1) + %164 = bitcast { i64, %Array* }* %161 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %164, i32 -1) + %165 = bitcast { { i64, %Array* }*, i2, i64 }* %159 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %165, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %166 = add i64 %155, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %labels, i32 -1) + %167 = sub i64 %76, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %168 = phi i64 [ 0, %exit__12 ], [ %173, %exiting__13 ] + %169 = icmp sle i64 %168, %167 + br i1 %169, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %170 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %168) + %171 = bitcast i8* %170 to %Array** + %172 = load %Array*, %Array** %171, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %172, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %173 = add i64 %168, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %localPL, i32 -1) + %174 = sub i64 %57, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %175 = phi i64 [ 0, %exit__13 ], [ %186, %exiting__14 ] + %176 = icmp sle i64 %175, %174 + br i1 %176, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %177 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %56, i64 %175) + %178 = bitcast i8* %177 to { { i64, %Array* }*, i2, i64 }** + %179 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %178, align 8 + %180 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %179, i32 0, i32 0 + %181 = load { i64, %Array* }*, { i64, %Array* }** %180, align 8 + %182 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %181, i32 0, i32 1 + %183 = load %Array*, %Array** %182, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %183, i32 -1) + %184 = bitcast { i64, %Array* }* %181 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %184, i32 -1) + %185 = bitcast { { i64, %Array* }*, i2, i64 }* %179 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %185, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %186 = add i64 %175, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %56, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %73, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %74, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %74, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %labels, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %75, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %75, i32 -1) + %187 = sub i64 %76, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %188 = phi i64 [ 0, %exit__14 ], [ %193, %exiting__15 ] + %189 = icmp sle i64 %188, %187 + br i1 %189, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %190 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %188) + %191 = bitcast i8* %190 to %Array** + %192 = load %Array*, %Array** %191, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %192, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %193 = add i64 %188, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %features, i32 -1) + %194 = call i64 @__quantum__rt__array_get_size_1d(%Array* %86) + %195 = sub i64 %194, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %196 = phi i64 [ 0, %exit__15 ], [ %201, %exiting__16 ] + %197 = icmp sle i64 %196, %195 + br i1 %197, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %198 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %86, i64 %196) + %199 = bitcast i8* %198 to %Array** + %200 = load %Array*, %Array** %199, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %200, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %201 = add i64 %196, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_reference_count(%Array* %86, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %probabilities, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 -1) + %202 = call i64 @__quantum__rt__array_get_size_1d(%Array* %90) + %203 = sub i64 %202, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %204 = phi i64 [ 0, %exit__16 ], [ %210, %exiting__17 ] + %205 = icmp sle i64 %204, %203 + br i1 %205, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %206 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %90, i64 %204) + %207 = bitcast i8* %206 to { double, i64 }** + %208 = load { double, i64 }*, { double, i64 }** %207, align 8 + %209 = bitcast { double, i64 }* %208 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %209, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %210 = add i64 %204, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_reference_count(%Array* %90, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %localPL, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %91, i32 -1) + ret { { %Array*, %Array*, double }*, i64 }* %114 +} + +define internal { %Array*, %Array*, double }* @Microsoft__Quantum__MachineLearning__SequentialModel__body(%Array* %Structure, %Array* %Parameters, double %Bias) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %Structure) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %Structure, i64 %2) + %5 = bitcast i8* %4 to { { i64, %Array* }*, i2, i64 }** + %6 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %5, align 8 + %7 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %6, i32 0, i32 0 + %8 = load { i64, %Array* }*, { i64, %Array* }** %7, align 8 + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %8, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { i64, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = bitcast { { i64, %Array* }*, i2, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %Structure, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %Parameters, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Array*, %Array*, double }* + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %15, i32 0, i32 2 + store %Array* %Structure, %Array** %16, align 8 + store %Array* %Parameters, %Array** %17, align 8 + store double %Bias, double* %18, align 8 + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %Structure) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %32, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %Structure, i64 %21) + %24 = bitcast i8* %23 to { { i64, %Array* }*, i2, i64 }** + %25 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %24, align 8 + %26 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %25, i32 0, i32 0 + %27 = load { i64, %Array* }*, { i64, %Array* }** %26, align 8 + %28 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %27, i32 0, i32 1 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 1) + %30 = bitcast { i64, %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + %31 = bitcast { { i64, %Array* }*, i2, i64 }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %32 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %Structure, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %Parameters, i32 1) + %33 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %45, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %Structure, i64 %34) + %37 = bitcast i8* %36 to { { i64, %Array* }*, i2, i64 }** + %38 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %37, align 8 + %39 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %38, i32 0, i32 0 + %40 = load { i64, %Array* }*, { i64, %Array* }** %39, align 8 + %41 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %40, i32 0, i32 1 + %42 = load %Array*, %Array** %41, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + %43 = bitcast { i64, %Array* }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + %44 = bitcast { { i64, %Array* }*, i2, i64 }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %45 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %Structure, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %Parameters, i32 -1) + ret { %Array*, %Array*, double }* %15 +} + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +define internal { %Array*, i64 }* @Microsoft__Quantum__MachineLearning__LabeledSample__body(%Array* %Features, i64 %Label) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %Features, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, i64 }* getelementptr ({ %Array*, i64 }, { %Array*, i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array*, i64 }* + %2 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %1, i32 0, i32 1 + store %Array* %Features, %Array** %2, align 8 + store i64 %Label, i64* %3, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %Features, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %Features, i32 -1) + ret { %Array*, i64 }* %1 +} + +define double @Microsoft__Quantum__Samples__ValidateHalfMoonModel__body(%Array* %validationVectors, %Array* %validationLabels, %Array* %parameters, double %bias) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %validationVectors) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %validationVectors, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %validationVectors, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %validationLabels, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %parameters, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning__LabeledSample__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = call %Array* @Microsoft__Quantum__Samples__Preprocessed__body(%Array* %validationVectors) + %10 = call %Array* @Microsoft__Quantum__Arrays___23e2330a73974b3abb47f8506d246967_Zipped__body(%Array* %9, %Array* %validationLabels) + %samples = call %Array* @Microsoft__Quantum__Arrays___c90f4b37e41846f5a59f0fb238007c41_Mapped__body(%Callable* %8, %Array* %10) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %13) + %16 = bitcast i8* %15 to { %Array*, i64 }** + %17 = load { %Array*, i64 }*, { %Array*, i64 }** %16, align 8 + %18 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %17, i32 0, i32 0 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %20 = bitcast { %Array*, i64 }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %22 = call %Array* @Microsoft__Quantum__Samples__ClassifierStructure__body() + %23 = call { %Array*, %Array*, double }* @Microsoft__Quantum__MachineLearning__SequentialModel__body(%Array* %22, %Array* %parameters, double %bias) + %24 = call { %Array* }* @Microsoft__Quantum__Samples__DefaultSchedule__body(%Array* %validationVectors) + %results = call { i64, i64 }* @Microsoft__Quantum__MachineLearning__ValidateSequentialClassifier__body({ %Array*, %Array*, double }* %23, %Array* %samples, double 5.000000e-03, i64 10000, { %Array* }* %24) + %25 = bitcast { i64, i64 }* %results to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 1) + %26 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %results, i32 0, i32 0 + %27 = load i64, i64* %26, align 4 + %28 = sitofp i64 %27 to double + %29 = sitofp i64 %11 to double + %30 = fdiv double %28, %29 + %31 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %23, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + %33 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %23, i32 0, i32 1 + %34 = load %Array*, %Array** %33, align 8 + %35 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %36 = load %Array*, %Array** %35, align 8 + %37 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %43, %exiting__3 ] + %39 = icmp sle i64 %38, %37 + br i1 %39, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %validationVectors, i64 %38) + %41 = bitcast i8* %40 to %Array** + %42 = load %Array*, %Array** %41, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %43 = add i64 %38, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %validationVectors, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %validationLabels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %parameters, i32 -1) + %44 = sub i64 %11, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %53, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %45) + %48 = bitcast i8* %47 to { %Array*, i64 }** + %49 = load { %Array*, i64 }*, { %Array*, i64 }** %48, align 8 + %50 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %49, i32 0, i32 0 + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 -1) + %52 = bitcast { %Array*, i64 }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %53 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + %54 = call i64 @__quantum__rt__array_get_size_1d(%Array* %9) + %55 = sub i64 %54, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %56 = phi i64 [ 0, %exit__4 ], [ %61, %exiting__5 ] + %57 = icmp sle i64 %56, %55 + br i1 %57, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %58 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 %56) + %59 = bitcast i8* %58 to %Array** + %60 = load %Array*, %Array** %59, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %61 = add i64 %56, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + %62 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %63 = sub i64 %62, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %64 = phi i64 [ 0, %exit__5 ], [ %72, %exiting__6 ] + %65 = icmp sle i64 %64, %63 + br i1 %65, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %64) + %67 = bitcast i8* %66 to { %Array*, i64 }** + %68 = load { %Array*, i64 }*, { %Array*, i64 }** %67, align 8 + %69 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %68, i32 0, i32 0 + %70 = load %Array*, %Array** %69, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %70, i32 -1) + %71 = bitcast { %Array*, i64 }* %68 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %71, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %72 = add i64 %64, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + %73 = sub i64 %11, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %74 = phi i64 [ 0, %exit__6 ], [ %82, %exiting__7 ] + %75 = icmp sle i64 %74, %73 + br i1 %75, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %74) + %77 = bitcast i8* %76 to { %Array*, i64 }** + %78 = load { %Array*, i64 }*, { %Array*, i64 }** %77, align 8 + %79 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %78, i32 0, i32 0 + %80 = load %Array*, %Array** %79, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 -1) + %81 = bitcast { %Array*, i64 }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %81, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %82 = add i64 %74, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %samples, i32 -1) + %83 = call i64 @__quantum__rt__array_get_size_1d(%Array* %22) + %84 = sub i64 %83, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %85 = phi i64 [ 0, %exit__7 ], [ %96, %exiting__8 ] + %86 = icmp sle i64 %85, %84 + br i1 %86, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %22, i64 %85) + %88 = bitcast i8* %87 to { { i64, %Array* }*, i2, i64 }** + %89 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %88, align 8 + %90 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %89, i32 0, i32 0 + %91 = load { i64, %Array* }*, { i64, %Array* }** %90, align 8 + %92 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %91, i32 0, i32 1 + %93 = load %Array*, %Array** %92, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %93, i32 -1) + %94 = bitcast { i64, %Array* }* %91 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %94, i32 -1) + %95 = bitcast { { i64, %Array* }*, i2, i64 }* %89 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %95, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %96 = add i64 %85, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 -1) + %97 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %98 = sub i64 %97, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %99 = phi i64 [ 0, %exit__8 ], [ %110, %exiting__9 ] + %100 = icmp sle i64 %99, %98 + br i1 %100, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %99) + %102 = bitcast i8* %101 to { { i64, %Array* }*, i2, i64 }** + %103 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %102, align 8 + %104 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %103, i32 0, i32 0 + %105 = load { i64, %Array* }*, { i64, %Array* }** %104, align 8 + %106 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %105, i32 0, i32 1 + %107 = load %Array*, %Array** %106, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %107, i32 -1) + %108 = bitcast { i64, %Array* }* %105 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %108, i32 -1) + %109 = bitcast { { i64, %Array* }*, i2, i64 }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %109, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %110 = add i64 %99, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %111 = bitcast { %Array*, %Array*, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %111, i32 -1) + %112 = call i64 @__quantum__rt__array_get_size_1d(%Array* %36) + %113 = sub i64 %112, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %114 = phi i64 [ 0, %exit__9 ], [ %119, %exiting__10 ] + %115 = icmp sle i64 %114, %113 + br i1 %115, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %116 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %36, i64 %114) + %117 = bitcast i8* %116 to %Range* + %118 = load %Range, %Range* %117, align 4 + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %119 = add i64 %114, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %36, i32 -1) + %120 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %120, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + ret double %30 +} + +define internal { i64, i64 }* @Microsoft__Quantum__MachineLearning__ValidateSequentialClassifier__body({ %Array*, %Array*, double }* %model, %Array* %samples, double %tolerance, i64 %nMeasurements, { %Array* }* %validationSchedule) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %21) + %24 = bitcast i8* %23 to { %Array*, i64 }** + %25 = load { %Array*, i64 }*, { %Array*, i64 }** %24, align 8 + %26 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %25, i32 0, i32 0 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %28 = bitcast { %Array*, i64 }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %30 = getelementptr inbounds { %Array* }, { %Array* }* %validationSchedule, i32 0, i32 0 + %31 = load %Array*, %Array** %30, align 8 + %32 = call i64 @__quantum__rt__array_get_size_1d(%Array* %31) + %33 = sub i64 %32, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %39, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %34) + %37 = bitcast i8* %36 to %Range* + %38 = load %Range, %Range* %37, align 4 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %39 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + %40 = bitcast { %Array* }* %validationSchedule to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %41 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning___Features__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %features = call %Array* @Microsoft__Quantum__Arrays___458019d5b77947a88477997a20fc14c5_Mapped__body(%Callable* %41, %Array* %samples) + %42 = call i64 @__quantum__rt__array_get_size_1d(%Array* %features) + %43 = sub i64 %42, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %44 = phi i64 [ 0, %exit__3 ], [ %49, %exiting__4 ] + %45 = icmp sle i64 %44, %43 + br i1 %45, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %44) + %47 = bitcast i8* %46 to %Array** + %48 = load %Array*, %Array** %47, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %48, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %49 = add i64 %44, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 1) + %50 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning___Label__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %51 = call %Array* @Microsoft__Quantum__Arrays___f34491685bf044f1939458f941be92ef_Mapped__body(%Callable* %50, %Array* %samples) + %labels = call %Array* @Microsoft__Quantum__MachineLearning___9df2eba66a764c8abe9e53ac519cccaa_Sampled__body({ %Array* }* %validationSchedule, %Array* %51) + call void @__quantum__rt__array_update_alias_count(%Array* %labels, i32 1) + %52 = call %Array* @Microsoft__Quantum__MachineLearning___6dd27c99de61421cb8da3bf3154034a7_Sampled__body({ %Array* }* %validationSchedule, %Array* %features) + %probabilities = call %Array* @Microsoft__Quantum__MachineLearning__EstimateClassificationProbabilities__body(double %tolerance, { %Array*, %Array*, double }* %model, %Array* %52, i64 %nMeasurements) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 1) + %53 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 2 + %54 = load double, double* %53, align 8 + %localPL = call %Array* @Microsoft__Quantum__MachineLearning__InferredLabels__body(double %54, %Array* %probabilities) + call void @__quantum__rt__array_update_alias_count(%Array* %localPL, i32 1) + %nMisclassifications = call i64 @Microsoft__Quantum__MachineLearning__NMisclassifications__body(%Array* %localPL, %Array* %labels) + %55 = call i64 @__quantum__rt__array_get_size_1d(%Array* %localPL) + %56 = call { i64, i64 }* @Microsoft__Quantum__MachineLearning__ValidationResults__body(i64 %nMisclassifications, i64 %55) + %57 = sub i64 %2, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %58 = phi i64 [ 0, %exit__4 ], [ %69, %exiting__5 ] + %59 = icmp sle i64 %58, %57 + br i1 %59, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %60 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %58) + %61 = bitcast i8* %60 to { { i64, %Array* }*, i2, i64 }** + %62 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %61, align 8 + %63 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %62, i32 0, i32 0 + %64 = load { i64, %Array* }*, { i64, %Array* }** %63, align 8 + %65 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %64, i32 0, i32 1 + %66 = load %Array*, %Array** %65, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 -1) + %67 = bitcast { i64, %Array* }* %64 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 -1) + %68 = bitcast { { i64, %Array* }*, i2, i64 }* %62 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %68, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %69 = add i64 %58, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + %70 = sub i64 %19, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %71 = phi i64 [ 0, %exit__5 ], [ %79, %exiting__6 ] + %72 = icmp sle i64 %71, %70 + br i1 %72, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %73 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %71) + %74 = bitcast i8* %73 to { %Array*, i64 }** + %75 = load { %Array*, i64 }*, { %Array*, i64 }** %74, align 8 + %76 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %75, i32 0, i32 0 + %77 = load %Array*, %Array** %76, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %77, i32 -1) + %78 = bitcast { %Array*, i64 }* %75 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %79 = add i64 %71, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + %80 = sub i64 %32, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %81 = phi i64 [ 0, %exit__6 ], [ %86, %exiting__7 ] + %82 = icmp sle i64 %81, %80 + br i1 %82, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %81) + %84 = bitcast i8* %83 to %Range* + %85 = load %Range, %Range* %84, align 4 + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %86 = add i64 %81, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 -1) + %87 = sub i64 %42, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %88 = phi i64 [ 0, %exit__7 ], [ %93, %exiting__8 ] + %89 = icmp sle i64 %88, %87 + br i1 %89, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %88) + %91 = bitcast i8* %90 to %Array** + %92 = load %Array*, %Array** %91, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %92, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %93 = add i64 %88, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %labels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %localPL, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %41, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %41, i32 -1) + %94 = sub i64 %42, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %95 = phi i64 [ 0, %exit__8 ], [ %100, %exiting__9 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %95) + %98 = bitcast i8* %97 to %Array** + %99 = load %Array*, %Array** %98, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %100 = add i64 %95, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %features, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %50, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %50, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %51, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %labels, i32 -1) + %101 = call i64 @__quantum__rt__array_get_size_1d(%Array* %52) + %102 = sub i64 %101, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %103 = phi i64 [ 0, %exit__9 ], [ %108, %exiting__10 ] + %104 = icmp sle i64 %103, %102 + br i1 %104, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %105 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %52, i64 %103) + %106 = bitcast i8* %105 to %Array** + %107 = load %Array*, %Array** %106, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %107, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %108 = add i64 %103, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %probabilities, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %localPL, i32 -1) + ret { i64, i64 }* %56 +} + +define internal double @Microsoft__Quantum__Arrays___64d768d0751b4ad8b5cf130c7bf24274_Fold__body(%Callable* %folder, double %state, %Array* %array) { +entry: + %current = alloca double, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store double %state, double* %current, align 8 + %0 = call %Range @Microsoft__Quantum__Arrays___ea8dc357841940139fee623fefb8c332_IndexRange__body(%Array* %array) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxElement = phi i64 [ %1, %preheader__1 ], [ %20, %exiting__1 ] + %5 = icmp sle i64 %idxElement, %3 + %6 = icmp sge i64 %idxElement, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = load double, double* %current, align 8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, double }* + %14 = getelementptr inbounds { double, double }, { double, double }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, double }, { double, double }* %13, i32 0, i32 1 + store double %8, double* %14, align 8 + store double %11, double* %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %folder, %Tuple* %12, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { double }* + %18 = getelementptr inbounds { double }, { double }* %17, i32 0, i32 0 + %19 = load double, double* %18, align 8 + store double %19, double* %current, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %idxElement, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + %21 = load double, double* %current, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret double %21 +} + +define internal void @Microsoft__Quantum__Math__TimesD__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call double @Microsoft__Quantum__Math__TimesD__body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %5, double* %7, align 8 + ret void +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal double @Microsoft__Quantum__Math__TimesD__body(double %a, double %b) { +entry: + %0 = fmul double %a, %b + ret double %0 +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____body({ i64, %Callable* }* %inputEncoder, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %inputEncoder, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %inputEncoder to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %target) + %4 = bitcast { %Array* }* %3 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %1, %Tuple* %4, %Tuple* null) + %5 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %__Item1__) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %__Item1__, %Array** %2, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__Item1__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__Item1__, i32 -1) + ret { %Array* }* %1 +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____adj({ i64, %Callable* }* %inputEncoder, %Array* %target) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %inputEncoder, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %inputEncoder to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + %4 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %target) + %5 = bitcast { %Array* }* %4 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %5, %Tuple* null) + %6 = getelementptr inbounds { %Array* }, { %Array* }* %4, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____ctl(%Array* %__controlQubits__, { { i64, %Callable* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %0, i32 0, i32 0 + %inputEncoder = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %inputEncoder, i32 0, i32 1 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { i64, %Callable* }* %inputEncoder to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %3, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %7 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %target) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %10, align 8 + store { %Array* }* %7, { %Array* }** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %8, %Tuple* null) + %12 = getelementptr inbounds { %Array* }, { %Array* }* %7, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + %14 = bitcast { %Array* }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____ctladj(%Array* %__controlQubits__, { { i64, %Callable* }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %0, i32 0, i32 0 + %inputEncoder = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %inputEncoder, i32 0, i32 1 + %3 = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 1) + %4 = bitcast { i64, %Callable* }* %inputEncoder to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 1) + %5 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %3, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %7 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %target) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %10, align 8 + store { %Array* }* %7, { %Array* }** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %8, %Tuple* null) + %12 = getelementptr inbounds { %Array* }, { %Array* }* %7, i32 0, i32 0 + %13 = load %Array*, %Array** %12, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + %14 = bitcast { %Array* }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal { %Array*, %Array*, double }* @Microsoft__Quantum__MachineLearning____QsRef0___TrainSequentialClassifierAtModel____body({ %Array*, %Array*, double }* %model, %Array* %samples, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, { %Array* }* %schedule) { +entry: + %nStalls = alloca i64, align 8 + %batchSize = alloca i64, align 8 + %lrate = alloca double, align 8 + %current = alloca { %Array*, %Array*, double }*, align 8 + %nBestMisses = alloca i64, align 8 + %bestSoFar = alloca { %Array*, %Array*, double }*, align 8 + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + %nSamples = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %19 = sub i64 %nSamples, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %20) + %23 = bitcast i8* %22 to { %Array*, i64 }** + %24 = load { %Array*, i64 }*, { %Array*, i64 }** %23, align 8 + %25 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %26, i32 1) + %27 = bitcast { %Array*, i64 }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %29 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 8 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %30, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %30, i32 1) + %31 = bitcast { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 1) + %32 = getelementptr inbounds { %Array* }, { %Array* }* %schedule, i32 0, i32 0 + %33 = load %Array*, %Array** %32, align 8 + %34 = call i64 @__quantum__rt__array_get_size_1d(%Array* %33) + %35 = sub i64 %34, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %36 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %36) + %39 = bitcast i8* %38 to %Range* + %40 = load %Range, %Range* %39, align 4 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %36, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + %42 = bitcast { %Array* }* %schedule to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + %43 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning___Features__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %features = call %Array* @Microsoft__Quantum__Arrays___458019d5b77947a88477997a20fc14c5_Mapped__body(%Callable* %43, %Array* %samples) + %44 = call i64 @__quantum__rt__array_get_size_1d(%Array* %features) + %45 = sub i64 %44, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %51, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %46) + %49 = bitcast i8* %48 to %Array** + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %51 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 1) + %52 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning___Label__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %actualLabels = call %Array* @Microsoft__Quantum__Arrays___f34491685bf044f1939458f941be92ef_Mapped__body(%Callable* %52, %Array* %samples) + call void @__quantum__rt__array_update_alias_count(%Array* %actualLabels, i32 1) + %53 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 1 + %54 = load double, double* %53, align 8 + %55 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 3 + %56 = load i64, i64* %55, align 4 + %probabilities = call %Array* @Microsoft__Quantum__MachineLearning__EstimateClassificationProbabilities__body(double %54, { %Array*, %Array*, double }* %model, %Array* %features, i64 %56) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 1) + %57 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %18, i1 false) + %58 = bitcast %Tuple* %57 to { %Array*, %Array*, double }* + %59 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %58, i32 0, i32 2 + %60 = call %Array* @Microsoft__Quantum__Arrays___e71c6b9cbb804917a4d7cd04011f2188_Zipped__body(%Array* %probabilities, %Array* %actualLabels) + %61 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 2 + %62 = load double, double* %61, align 8 + %63 = call double @Microsoft__Quantum__MachineLearning____QsRef0__UpdatedBias____body(%Array* %60, double %62, double %54) + store double %63, double* %59, align 8 + store { %Array*, %Array*, double }* %58, { %Array*, %Array*, double }** %bestSoFar, align 8 + %64 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %58, i32 0, i32 0 + %65 = load %Array*, %Array** %64, align 8 + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %65) + %67 = sub i64 %66, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %68 = phi i64 [ 0, %exit__4 ], [ %79, %exiting__5 ] + %69 = icmp sle i64 %68, %67 + br i1 %69, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 %68) + %71 = bitcast i8* %70 to { { i64, %Array* }*, i2, i64 }** + %72 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %71, align 8 + %73 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %72, i32 0, i32 0 + %74 = load { i64, %Array* }*, { i64, %Array* }** %73, align 8 + %75 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %74, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 1) + %77 = bitcast { i64, %Array* }* %74 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + %78 = bitcast { { i64, %Array* }*, i2, i64 }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %79 = add i64 %68, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 1) + %80 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %58, i32 0, i32 1 + %81 = load %Array*, %Array** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %inferredLabels = call %Array* @Microsoft__Quantum__MachineLearning__InferredLabels__body(double %63, %Array* %probabilities) + call void @__quantum__rt__array_update_alias_count(%Array* %inferredLabels, i32 1) + %82 = call %Array* @Microsoft__Quantum__MachineLearning__Misclassifications__body(%Array* %inferredLabels, %Array* %actualLabels) + %83 = call i64 @__quantum__rt__array_get_size_1d(%Array* %82) + store i64 %83, i64* %nBestMisses, align 4 + store { %Array*, %Array*, double }* %58, { %Array*, %Array*, double }** %current, align 8 + %84 = sub i64 %66, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %85 = phi i64 [ 0, %exit__5 ], [ %96, %exiting__6 ] + %86 = icmp sle i64 %85, %84 + br i1 %86, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 %85) + %88 = bitcast i8* %87 to { { i64, %Array* }*, i2, i64 }** + %89 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %88, align 8 + %90 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %89, i32 0, i32 0 + %91 = load { i64, %Array* }*, { i64, %Array* }** %90, align 8 + %92 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %91, i32 0, i32 1 + %93 = load %Array*, %Array** %92, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %93, i32 1) + %94 = bitcast { i64, %Array* }* %91 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %94, i32 1) + %95 = bitcast { { i64, %Array* }*, i2, i64 }* %89 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %95, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %96 = add i64 %85, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + %97 = sub i64 %66, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %98 = phi i64 [ 0, %exit__6 ], [ %109, %exiting__7 ] + %99 = icmp sle i64 %98, %97 + br i1 %99, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 %98) + %101 = bitcast i8* %100 to { { i64, %Array* }*, i2, i64 }** + %102 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %101, align 8 + %103 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %102, i32 0, i32 0 + %104 = load { i64, %Array* }*, { i64, %Array* }** %103, align 8 + %105 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %104, i32 0, i32 1 + %106 = load %Array*, %Array** %105, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %106, i32 1) + %107 = bitcast { i64, %Array* }* %104 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %107, i32 1) + %108 = bitcast { { i64, %Array* }*, i2, i64 }* %102 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %108, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %109 = add i64 %98, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %57, i32 1) + %110 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([28 x i8], [28 x i8]* @1, i32 0, i32 0)) + %111 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %String* }* getelementptr ({ %String* }, { %String* }* null, i32 1) to i64)) + %112 = bitcast %Tuple* %111 to { %String* }* + %113 = getelementptr inbounds { %String* }, { %String* }* %112, i32 0, i32 0 + store %String* %110, %String** %113, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %30, %Tuple* %111, %Tuple* null) + %114 = sitofp i64 %2 to double + %effectiveTolerance = fdiv double %54, %114 + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 0) + %116 = bitcast i8* %115 to { %Array*, i64 }** + %117 = load { %Array*, i64 }*, { %Array*, i64 }** %116, align 8 + %118 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %117, i32 0, i32 0 + %119 = load %Array*, %Array** %118, align 8 + %120 = call i64 @Microsoft__Quantum__MachineLearning__FeatureRegisterSize__body(%Array* %119) + %121 = call i64 @Microsoft__Quantum__MachineLearning__NQubitsRequired__body({ %Array*, %Array*, double }* %model) + %nQubits = call i64 @Microsoft__Quantum__Math__MaxI__body(i64 %120, i64 %121) + %122 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning____QsRef0__EncodeSample____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %123 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %124 = bitcast %Tuple* %123 to { %Callable*, double, i64 }* + %125 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %124, i32 0, i32 0 + %126 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %124, i32 0, i32 1 + %127 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %124, i32 0, i32 2 + store %Callable* %122, %Callable** %125, align 8 + store double %effectiveTolerance, double* %126, align 8 + store i64 %nQubits, i64* %127, align 4 + %128 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__2__FunctionTable, %Tuple* %123) + %encodedSamples = call %Array* @Microsoft__Quantum__Arrays___babf2e1b7d9541c0a4b642aa9d5d6bbf_Mapped__body(%Callable* %128, %Array* %samples) + %129 = call i64 @__quantum__rt__array_get_size_1d(%Array* %encodedSamples) + %130 = sub i64 %129, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %131 = phi i64 [ 0, %exit__7 ], [ %147, %exiting__8 ] + %132 = icmp sle i64 %131, %130 + br i1 %132, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %131) + %134 = bitcast i8* %133 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %135 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %134, align 8 + %136 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %135, i32 0, i32 0 + %137 = load { %Array*, i64 }*, { %Array*, i64 }** %136, align 8 + %138 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %137, i32 0, i32 0 + %139 = load %Array*, %Array** %138, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %139, i32 1) + %140 = bitcast { %Array*, i64 }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %140, i32 1) + %141 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %135, i32 0, i32 1 + %142 = load { i64, %Callable* }*, { i64, %Callable* }** %141, align 8 + %143 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %142, i32 0, i32 1 + %144 = load %Callable*, %Callable** %143, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %144, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %144, i32 1) + %145 = bitcast { i64, %Callable* }* %142 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %145, i32 1) + %146 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %135 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %146, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %147 = add i64 %131, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %encodedSamples, i32 1) + %148 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 0 + %149 = load double, double* %148, align 8 + store double %149, double* %lrate, align 8 + %150 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 2 + %151 = load i64, i64* %150, align 4 + store i64 %151, i64* %batchSize, align 4 + store i64 0, i64* %nStalls, align 4 + %152 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 4 + %153 = load i64, i64* %152, align 4 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %ep = phi i64 [ 1, %exit__8 ], [ %185, %exiting__9 ] + %154 = icmp sle i64 %ep, %153 + br i1 %154, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %155 = load %Callable*, %Callable** %29, align 8 + %156 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([21 x i8], [21 x i8]* @2, i32 0, i32 0)) + %157 = call %String* @__quantum__rt__int_to_string(i64 %ep) + %158 = call %String* @__quantum__rt__string_concatenate(%String* %156, %String* %157) + call void @__quantum__rt__string_update_reference_count(%String* %156, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %157, i32 -1) + %159 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + %160 = call %String* @__quantum__rt__string_concatenate(%String* %158, %String* %159) + call void @__quantum__rt__string_update_reference_count(%String* %158, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %159, i32 -1) + %161 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %String* }* getelementptr ({ %String* }, { %String* }* null, i32 1) to i64)) + %162 = bitcast %Tuple* %161 to { %String* }* + %163 = getelementptr inbounds { %String* }, { %String* }* %162, i32 0, i32 0 + store %String* %160, %String** %163, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %155, %Tuple* %161, %Tuple* null) + %164 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 7 + %165 = load i64, i64* %164, align 4 + %166 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %31, i1 false) + %167 = bitcast %Tuple* %166 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %168 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %167, i32 0, i32 0 + %169 = load double, double* %lrate, align 8 + store double %169, double* %168, align 8 + %170 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %167, i32 0, i32 8 + %171 = load %Callable*, %Callable** %170, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %171, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %171, i32 1) + %172 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %166, i1 false) + %173 = bitcast %Tuple* %172 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %174 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %173, i32 0, i32 2 + %175 = load i64, i64* %batchSize, align 4 + store i64 %175, i64* %174, align 4 + %176 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %current, align 8 + %177 = load i64, i64* %nBestMisses, align 4 + %178 = call { i64, { %Array*, %Array*, double }* }* @Microsoft__Quantum__MachineLearning____QsRef0__RunSingleTrainingEpoch____body(%Array* %encodedSamples, { %Array* }* %schedule, i64 %165, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %173, { %Array*, %Array*, double }* %176, i64 %177) + %179 = getelementptr inbounds { i64, { %Array*, %Array*, double }* }, { i64, { %Array*, %Array*, double }* }* %178, i32 0, i32 0 + %nMisses = load i64, i64* %179, align 4 + %180 = getelementptr inbounds { i64, { %Array*, %Array*, double }* }, { i64, { %Array*, %Array*, double }* }* %178, i32 0, i32 1 + %proposedUpdate = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %180, align 8 + %181 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %proposedUpdate, i32 0, i32 0 + %182 = load %Array*, %Array** %181, align 8 + %183 = call i64 @__quantum__rt__array_get_size_1d(%Array* %182) + %184 = sub i64 %183, 1 + br label %header__10 + +exiting__9: ; preds = %exit__51 + %185 = add i64 %ep, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + %186 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %bestSoFar, align 8 + %187 = load %String*, %String** %113, align 8 + %188 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %current, align 8 + %189 = load %Array*, %Array** %64, align 8 + %190 = load %Array*, %Array** %80, align 8 + %191 = call i64 @__quantum__rt__array_get_size_1d(%Array* %189) + %192 = sub i64 %191, 1 + br label %header__52 + +header__10: ; preds = %exiting__10, %body__9 + %193 = phi i64 [ 0, %body__9 ], [ %204, %exiting__10 ] + %194 = icmp sle i64 %193, %184 + br i1 %194, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %195 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %193) + %196 = bitcast i8* %195 to { { i64, %Array* }*, i2, i64 }** + %197 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %196, align 8 + %198 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %197, i32 0, i32 0 + %199 = load { i64, %Array* }*, { i64, %Array* }** %198, align 8 + %200 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %199, i32 0, i32 1 + %201 = load %Array*, %Array** %200, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %201, i32 1) + %202 = bitcast { i64, %Array* }* %199 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %202, i32 1) + %203 = bitcast { { i64, %Array* }*, i2, i64 }* %197 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %203, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %204 = add i64 %193, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 1) + %205 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %proposedUpdate, i32 0, i32 1 + %206 = load %Array*, %Array** %205, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 1) + %207 = bitcast { %Array*, %Array*, double }* %proposedUpdate to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %207, i32 1) + %208 = icmp slt i64 %nMisses, %177 + br i1 %208, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__10 + store i64 %nMisses, i64* %nBestMisses, align 4 + %209 = sub i64 %183, 1 + br label %header__11 + +continue__1: ; preds = %continue__2, %exit__10 + %210 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %176, i32 0, i32 2 + %211 = load double, double* %210, align 8 + %212 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %proposedUpdate, i32 0, i32 2 + %213 = load double, double* %212, align 8 + %214 = call i1 @Microsoft__Quantum__Logical__NearlyEqualD__body(double %211, double %213) + br i1 %214, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %continue__1 + %215 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %176, i32 0, i32 1 + %216 = load %Array*, %Array** %215, align 8 + %217 = call i1 @Microsoft__Quantum__MachineLearning____QsRef0__AllNearlyEqualD____body(%Array* %216, %Array* %206) + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %continue__1 + %218 = phi i1 [ %217, %condTrue__1 ], [ %214, %continue__1 ] + br i1 %218, label %then0__3, label %else__1 + +then0__3: ; preds = %condContinue__1 + %219 = load i64, i64* %nStalls, align 4 + %220 = add i64 %219, 1 + store i64 %220, i64* %nStalls, align 4 + %221 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 5 + %222 = load i64, i64* %221, align 4 + %223 = icmp sgt i64 %220, %222 + br i1 %223, label %then0__4, label %continue__4 + +then0__4: ; preds = %then0__3 + %224 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %bestSoFar, align 8 + %225 = load %String*, %String** %113, align 8 + %226 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %173, i32 0, i32 8 + %227 = load %Callable*, %Callable** %226, align 8 + %228 = load %Array*, %Array** %64, align 8 + %229 = load %Array*, %Array** %80, align 8 + %230 = call i64 @__quantum__rt__array_get_size_1d(%Array* %228) + %231 = sub i64 %230, 1 + br label %header__29 + +continue__4: ; preds = %then0__3 + store i64 %220, i64* %batchSize, align 4 + %232 = load double, double* %lrate, align 8 + %233 = fmul double %232, 1.250000e+00 + store double %233, double* %lrate, align 8 + %234 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 5 + %235 = load i64, i64* %234, align 4 + %236 = sdiv i64 %235, 2 + %237 = icmp sgt i64 %220, %236 + br i1 %237, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + %238 = load %Array*, %Array** %0, align 8 + %239 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning____QsRef0__RandomlyRescale____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %240 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 6 + %241 = load double, double* %240, align 8 + %242 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %243 = bitcast %Tuple* %242 to { %Callable*, double }* + %244 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %243, i32 0, i32 0 + %245 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %243, i32 0, i32 1 + store %Callable* %239, %Callable** %244, align 8 + store double %241, double* %245, align 8 + %246 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__3__FunctionTable, %Tuple* %242) + %247 = call %Array* @Microsoft__Quantum__Arrays___6bc4a411bac74d8081320ac7e36319e3_ForEach__body(%Callable* %246, %Array* %206) + %248 = call double @Microsoft__Quantum__MachineLearning____QsRef0__RandomlyRescale____body(double %241, double %213) + %249 = call { %Array*, %Array*, double }* @Microsoft__Quantum__MachineLearning__SequentialModel__body(%Array* %238, %Array* %247, double %248) + %250 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %249, i32 0, i32 0 + %251 = load %Array*, %Array** %250, align 8 + %252 = call i64 @__quantum__rt__array_get_size_1d(%Array* %251) + %253 = sub i64 %252, 1 + br label %header__43 + +continue__5: ; preds = %exit__45, %continue__4 + br label %continue__3 + +else__1: ; preds = %condContinue__1 + store i64 0, i64* %nStalls, align 4 + %254 = load double, double* %148, align 8 + store double %254, double* %lrate, align 8 + %255 = load i64, i64* %150, align 4 + store i64 %255, i64* %batchSize, align 4 + %256 = sub i64 %183, 1 + br label %header__46 + +continue__3: ; preds = %exit__49, %continue__5 + %257 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %173, i32 0, i32 8 + %258 = load %Callable*, %Callable** %257, align 8 + %259 = sub i64 %183, 1 + br label %header__50 + +header__11: ; preds = %exiting__11, %then0__1 + %260 = phi i64 [ 0, %then0__1 ], [ %271, %exiting__11 ] + %261 = icmp sle i64 %260, %209 + br i1 %261, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %262 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %260) + %263 = bitcast i8* %262 to { { i64, %Array* }*, i2, i64 }** + %264 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %263, align 8 + %265 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %264, i32 0, i32 0 + %266 = load { i64, %Array* }*, { i64, %Array* }** %265, align 8 + %267 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %266, i32 0, i32 1 + %268 = load %Array*, %Array** %267, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %268, i32 1) + %269 = bitcast { i64, %Array* }* %266 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %269, i32 1) + %270 = bitcast { { i64, %Array* }*, i2, i64 }* %264 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %270, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %271 = add i64 %260, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %207, i32 1) + %272 = sub i64 %183, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %273 = phi i64 [ 0, %exit__11 ], [ %284, %exiting__12 ] + %274 = icmp sle i64 %273, %272 + br i1 %274, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %275 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %273) + %276 = bitcast i8* %275 to { { i64, %Array* }*, i2, i64 }** + %277 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %276, align 8 + %278 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %277, i32 0, i32 0 + %279 = load { i64, %Array* }*, { i64, %Array* }** %278, align 8 + %280 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %279, i32 0, i32 1 + %281 = load %Array*, %Array** %280, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %281, i32 1) + %282 = bitcast { i64, %Array* }* %279 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %282, i32 1) + %283 = bitcast { { i64, %Array* }*, i2, i64 }* %277 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %283, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %284 = add i64 %273, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %182, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %206, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %207, i32 1) + %285 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %bestSoFar, align 8 + %286 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %285, i32 0, i32 0 + %287 = load %Array*, %Array** %286, align 8 + %288 = call i64 @__quantum__rt__array_get_size_1d(%Array* %287) + %289 = sub i64 %288, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %290 = phi i64 [ 0, %exit__12 ], [ %301, %exiting__13 ] + %291 = icmp sle i64 %290, %289 + br i1 %291, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %292 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %287, i64 %290) + %293 = bitcast i8* %292 to { { i64, %Array* }*, i2, i64 }** + %294 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %293, align 8 + %295 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %294, i32 0, i32 0 + %296 = load { i64, %Array* }*, { i64, %Array* }** %295, align 8 + %297 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %296, i32 0, i32 1 + %298 = load %Array*, %Array** %297, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %298, i32 -1) + %299 = bitcast { i64, %Array* }* %296 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %299, i32 -1) + %300 = bitcast { { i64, %Array* }*, i2, i64 }* %294 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %300, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %301 = add i64 %290, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %287, i32 -1) + %302 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %285, i32 0, i32 1 + %303 = load %Array*, %Array** %302, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %303, i32 -1) + %304 = bitcast { %Array*, %Array*, double }* %285 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %304, i32 -1) + %305 = sub i64 %288, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %306 = phi i64 [ 0, %exit__13 ], [ %317, %exiting__14 ] + %307 = icmp sle i64 %306, %305 + br i1 %307, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %308 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %287, i64 %306) + %309 = bitcast i8* %308 to { { i64, %Array* }*, i2, i64 }** + %310 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %309, align 8 + %311 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %310, i32 0, i32 0 + %312 = load { i64, %Array* }*, { i64, %Array* }** %311, align 8 + %313 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %312, i32 0, i32 1 + %314 = load %Array*, %Array** %313, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %314, i32 -1) + %315 = bitcast { i64, %Array* }* %312 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %315, i32 -1) + %316 = bitcast { { i64, %Array* }*, i2, i64 }* %310 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %316, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %317 = add i64 %306, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %287, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %303, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %304, i32 -1) + store { %Array*, %Array*, double }* %proposedUpdate, { %Array*, %Array*, double }** %bestSoFar, align 8 + %318 = sitofp i64 %nMisses to double + %319 = sitofp i64 %nSamples to double + %320 = fdiv double %318, %319 + %321 = load double, double* %53, align 8 + %322 = fcmp olt double %320, %321 + br i1 %322, label %then0__2, label %continue__2 + +then0__2: ; preds = %exit__14 + %323 = load %String*, %String** %113, align 8 + %324 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %173, i32 0, i32 8 + %325 = load %Callable*, %Callable** %324, align 8 + %326 = load %Array*, %Array** %64, align 8 + %327 = load %Array*, %Array** %80, align 8 + %328 = call i64 @__quantum__rt__array_get_size_1d(%Array* %326) + %329 = sub i64 %328, 1 + br label %header__15 + +continue__2: ; preds = %exit__14 + store i64 0, i64* %nStalls, align 4 + %330 = load double, double* %148, align 8 + store double %330, double* %lrate, align 8 + %331 = load i64, i64* %150, align 4 + store i64 %331, i64* %batchSize, align 4 + br label %continue__1 + +header__15: ; preds = %exiting__15, %then0__2 + %332 = phi i64 [ 0, %then0__2 ], [ %343, %exiting__15 ] + %333 = icmp sle i64 %332, %329 + br i1 %333, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %334 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %326, i64 %332) + %335 = bitcast i8* %334 to { { i64, %Array* }*, i2, i64 }** + %336 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %335, align 8 + %337 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %336, i32 0, i32 0 + %338 = load { i64, %Array* }*, { i64, %Array* }** %337, align 8 + %339 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %338, i32 0, i32 1 + %340 = load %Array*, %Array** %339, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %340, i32 1) + %341 = bitcast { i64, %Array* }* %338 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %341, i32 1) + %342 = bitcast { { i64, %Array* }*, i2, i64 }* %336 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %342, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %343 = add i64 %332, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %326, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %327, i32 1) + %344 = sub i64 %183, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %345 = phi i64 [ 0, %exit__15 ], [ %356, %exiting__16 ] + %346 = icmp sle i64 %345, %344 + br i1 %346, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %347 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %345) + %348 = bitcast i8* %347 to { { i64, %Array* }*, i2, i64 }** + %349 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %348, align 8 + %350 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %349, i32 0, i32 0 + %351 = load { i64, %Array* }*, { i64, %Array* }** %350, align 8 + %352 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %351, i32 0, i32 1 + %353 = load %Array*, %Array** %352, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %353, i32 -1) + %354 = bitcast { i64, %Array* }* %351 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %354, i32 -1) + %355 = bitcast { { i64, %Array* }*, i2, i64 }* %349 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %355, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %356 = add i64 %345, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %207, i32 -1) + %357 = load %Array*, %Array** %0, align 8 + %358 = call i64 @__quantum__rt__array_get_size_1d(%Array* %357) + %359 = sub i64 %358, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %360 = phi i64 [ 0, %exit__16 ], [ %371, %exiting__17 ] + %361 = icmp sle i64 %360, %359 + br i1 %361, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %362 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %357, i64 %360) + %363 = bitcast i8* %362 to { { i64, %Array* }*, i2, i64 }** + %364 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %363, align 8 + %365 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %364, i32 0, i32 0 + %366 = load { i64, %Array* }*, { i64, %Array* }** %365, align 8 + %367 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %366, i32 0, i32 1 + %368 = load %Array*, %Array** %367, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %368, i32 -1) + %369 = bitcast { i64, %Array* }* %366 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %369, i32 -1) + %370 = bitcast { { i64, %Array* }*, i2, i64 }* %364 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %370, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %371 = add i64 %360, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %357, i32 -1) + %372 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %372, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + %373 = sub i64 %nSamples, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %374 = phi i64 [ 0, %exit__17 ], [ %382, %exiting__18 ] + %375 = icmp sle i64 %374, %373 + br i1 %375, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %376 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %374) + %377 = bitcast i8* %376 to { %Array*, i64 }** + %378 = load { %Array*, i64 }*, { %Array*, i64 }** %377, align 8 + %379 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %378, i32 0, i32 0 + %380 = load %Array*, %Array** %379, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %380, i32 -1) + %381 = bitcast { %Array*, i64 }* %378 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %381, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %382 = add i64 %374, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %155, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %155, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + %383 = load %Array*, %Array** %32, align 8 + %384 = call i64 @__quantum__rt__array_get_size_1d(%Array* %383) + %385 = sub i64 %384, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %386 = phi i64 [ 0, %exit__18 ], [ %391, %exiting__19 ] + %387 = icmp sle i64 %386, %385 + br i1 %387, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %388 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %383, i64 %386) + %389 = bitcast i8* %388 to %Range* + %390 = load %Range, %Range* %389, align 4 + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %391 = add i64 %386, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %383, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %392 = sub i64 %44, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %393 = phi i64 [ 0, %exit__19 ], [ %398, %exiting__20 ] + %394 = icmp sle i64 %393, %392 + br i1 %394, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %395 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %393) + %396 = bitcast i8* %395 to %Array** + %397 = load %Array*, %Array** %396, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %397, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %398 = add i64 %393, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 -1) + %399 = sub i64 %183, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %400 = phi i64 [ 0, %exit__20 ], [ %411, %exiting__21 ] + %401 = icmp sle i64 %400, %399 + br i1 %401, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %402 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %400) + %403 = bitcast i8* %402 to { { i64, %Array* }*, i2, i64 }** + %404 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %403, align 8 + %405 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %404, i32 0, i32 0 + %406 = load { i64, %Array* }*, { i64, %Array* }** %405, align 8 + %407 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %406, i32 0, i32 1 + %408 = load %Array*, %Array** %407, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %408, i32 -1) + %409 = bitcast { i64, %Array* }* %406 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %409, i32 -1) + %410 = bitcast { { i64, %Array* }*, i2, i64 }* %404 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %410, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %411 = add i64 %400, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %207, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %inferredLabels, i32 -1) + %412 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %176, i32 0, i32 0 + %413 = load %Array*, %Array** %412, align 8 + %414 = call i64 @__quantum__rt__array_get_size_1d(%Array* %413) + %415 = sub i64 %414, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %416 = phi i64 [ 0, %exit__21 ], [ %427, %exiting__22 ] + %417 = icmp sle i64 %416, %415 + br i1 %417, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %418 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %413, i64 %416) + %419 = bitcast i8* %418 to { { i64, %Array* }*, i2, i64 }** + %420 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %419, align 8 + %421 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %420, i32 0, i32 0 + %422 = load { i64, %Array* }*, { i64, %Array* }** %421, align 8 + %423 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %422, i32 0, i32 1 + %424 = load %Array*, %Array** %423, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %424, i32 -1) + %425 = bitcast { i64, %Array* }* %422 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %425, i32 -1) + %426 = bitcast { { i64, %Array* }*, i2, i64 }* %420 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %426, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %427 = add i64 %416, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %413, i32 -1) + %428 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %176, i32 0, i32 1 + %429 = load %Array*, %Array** %428, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %429, i32 -1) + %430 = bitcast { %Array*, %Array*, double }* %176 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %430, i32 -1) + %431 = sub i64 %129, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %432 = phi i64 [ 0, %exit__22 ], [ %448, %exiting__23 ] + %433 = icmp sle i64 %432, %431 + br i1 %433, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %434 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %432) + %435 = bitcast i8* %434 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %436 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %435, align 8 + %437 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %436, i32 0, i32 0 + %438 = load { %Array*, i64 }*, { %Array*, i64 }** %437, align 8 + %439 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %438, i32 0, i32 0 + %440 = load %Array*, %Array** %439, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %440, i32 -1) + %441 = bitcast { %Array*, i64 }* %438 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %441, i32 -1) + %442 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %436, i32 0, i32 1 + %443 = load { i64, %Callable* }*, { i64, %Callable* }** %442, align 8 + %444 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %443, i32 0, i32 1 + %445 = load %Callable*, %Callable** %444, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %445, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %445, i32 -1) + %446 = bitcast { i64, %Callable* }* %443 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %446, i32 -1) + %447 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %436 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %447, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %448 = add i64 %432, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_alias_count(%Array* %encodedSamples, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %43, i32 -1) + %449 = sub i64 %44, 1 + br label %header__24 + +header__24: ; preds = %exiting__24, %exit__23 + %450 = phi i64 [ 0, %exit__23 ], [ %455, %exiting__24 ] + %451 = icmp sle i64 %450, %449 + br i1 %451, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %452 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %450) + %453 = bitcast i8* %452 to %Array** + %454 = load %Array*, %Array** %453, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %454, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %455 = add i64 %450, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_reference_count(%Array* %features, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %probabilities, i32 -1) + %456 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %457 = sub i64 %456, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %458 = phi i64 [ 0, %exit__24 ], [ %464, %exiting__25 ] + %459 = icmp sle i64 %458, %457 + br i1 %459, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %460 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %458) + %461 = bitcast i8* %460 to { double, i64 }** + %462 = load { double, i64 }*, { double, i64 }** %461, align 8 + %463 = bitcast { double, i64 }* %462 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %463, i32 -1) + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %464 = add i64 %458, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %inferredLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %323, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %128, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %128, i32 -1) + %465 = sub i64 %129, 1 + br label %header__26 + +header__26: ; preds = %exiting__26, %exit__25 + %466 = phi i64 [ 0, %exit__25 ], [ %482, %exiting__26 ] + %467 = icmp sle i64 %466, %465 + br i1 %467, label %body__26, label %exit__26 + +body__26: ; preds = %header__26 + %468 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %466) + %469 = bitcast i8* %468 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %470 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %469, align 8 + %471 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %470, i32 0, i32 0 + %472 = load { %Array*, i64 }*, { %Array*, i64 }** %471, align 8 + %473 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %472, i32 0, i32 0 + %474 = load %Array*, %Array** %473, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %474, i32 -1) + %475 = bitcast { %Array*, i64 }* %472 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %475, i32 -1) + %476 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %470, i32 0, i32 1 + %477 = load { i64, %Callable* }*, { i64, %Callable* }** %476, align 8 + %478 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %477, i32 0, i32 1 + %479 = load %Callable*, %Callable** %478, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %479, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %479, i32 -1) + %480 = bitcast { i64, %Callable* }* %477 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %480, i32 -1) + %481 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %470 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %481, i32 -1) + br label %exiting__26 + +exiting__26: ; preds = %body__26 + %482 = add i64 %466, 1 + br label %header__26 + +exit__26: ; preds = %header__26 + call void @__quantum__rt__array_update_reference_count(%Array* %encodedSamples, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %160, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %166, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %325, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %325, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %172, i32 -1) + %483 = bitcast { i64, { %Array*, %Array*, double }* }* %178 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %483, i32 -1) + %484 = sub i64 %183, 1 + br label %header__27 + +header__27: ; preds = %exiting__27, %exit__26 + %485 = phi i64 [ 0, %exit__26 ], [ %496, %exiting__27 ] + %486 = icmp sle i64 %485, %484 + br i1 %486, label %body__27, label %exit__27 + +body__27: ; preds = %header__27 + %487 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %485) + %488 = bitcast i8* %487 to { { i64, %Array* }*, i2, i64 }** + %489 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %488, align 8 + %490 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %489, i32 0, i32 0 + %491 = load { i64, %Array* }*, { i64, %Array* }** %490, align 8 + %492 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %491, i32 0, i32 1 + %493 = load %Array*, %Array** %492, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %493, i32 -1) + %494 = bitcast { i64, %Array* }* %491 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %494, i32 -1) + %495 = bitcast { { i64, %Array* }*, i2, i64 }* %489 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %495, i32 -1) + br label %exiting__27 + +exiting__27: ; preds = %body__27 + %496 = add i64 %485, 1 + br label %header__27 + +exit__27: ; preds = %header__27 + call void @__quantum__rt__array_update_reference_count(%Array* %182, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %206, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %207, i32 -1) + %497 = sub i64 %414, 1 + br label %header__28 + +header__28: ; preds = %exiting__28, %exit__27 + %498 = phi i64 [ 0, %exit__27 ], [ %509, %exiting__28 ] + %499 = icmp sle i64 %498, %497 + br i1 %499, label %body__28, label %exit__28 + +body__28: ; preds = %header__28 + %500 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %413, i64 %498) + %501 = bitcast i8* %500 to { { i64, %Array* }*, i2, i64 }** + %502 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %501, align 8 + %503 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %502, i32 0, i32 0 + %504 = load { i64, %Array* }*, { i64, %Array* }** %503, align 8 + %505 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %504, i32 0, i32 1 + %506 = load %Array*, %Array** %505, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %506, i32 -1) + %507 = bitcast { i64, %Array* }* %504 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %507, i32 -1) + %508 = bitcast { { i64, %Array* }*, i2, i64 }* %502 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %508, i32 -1) + br label %exiting__28 + +exiting__28: ; preds = %body__28 + %509 = add i64 %498, 1 + br label %header__28 + +exit__28: ; preds = %header__28 + call void @__quantum__rt__array_update_reference_count(%Array* %413, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %429, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %430, i32 -1) + ret { %Array*, %Array*, double }* %proposedUpdate + +header__29: ; preds = %exiting__29, %then0__4 + %510 = phi i64 [ 0, %then0__4 ], [ %521, %exiting__29 ] + %511 = icmp sle i64 %510, %231 + br i1 %511, label %body__29, label %exit__29 + +body__29: ; preds = %header__29 + %512 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %228, i64 %510) + %513 = bitcast i8* %512 to { { i64, %Array* }*, i2, i64 }** + %514 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %513, align 8 + %515 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %514, i32 0, i32 0 + %516 = load { i64, %Array* }*, { i64, %Array* }** %515, align 8 + %517 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %516, i32 0, i32 1 + %518 = load %Array*, %Array** %517, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %518, i32 1) + %519 = bitcast { i64, %Array* }* %516 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %519, i32 1) + %520 = bitcast { { i64, %Array* }*, i2, i64 }* %514 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %520, i32 1) + br label %exiting__29 + +exiting__29: ; preds = %body__29 + %521 = add i64 %510, 1 + br label %header__29 + +exit__29: ; preds = %header__29 + call void @__quantum__rt__array_update_reference_count(%Array* %228, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %229, i32 1) + %522 = sub i64 %183, 1 + br label %header__30 + +header__30: ; preds = %exiting__30, %exit__29 + %523 = phi i64 [ 0, %exit__29 ], [ %534, %exiting__30 ] + %524 = icmp sle i64 %523, %522 + br i1 %524, label %body__30, label %exit__30 + +body__30: ; preds = %header__30 + %525 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %523) + %526 = bitcast i8* %525 to { { i64, %Array* }*, i2, i64 }** + %527 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %526, align 8 + %528 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %527, i32 0, i32 0 + %529 = load { i64, %Array* }*, { i64, %Array* }** %528, align 8 + %530 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %529, i32 0, i32 1 + %531 = load %Array*, %Array** %530, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %531, i32 -1) + %532 = bitcast { i64, %Array* }* %529 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %532, i32 -1) + %533 = bitcast { { i64, %Array* }*, i2, i64 }* %527 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %533, i32 -1) + br label %exiting__30 + +exiting__30: ; preds = %body__30 + %534 = add i64 %523, 1 + br label %header__30 + +exit__30: ; preds = %header__30 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %207, i32 -1) + %535 = load %Array*, %Array** %0, align 8 + %536 = call i64 @__quantum__rt__array_get_size_1d(%Array* %535) + %537 = sub i64 %536, 1 + br label %header__31 + +header__31: ; preds = %exiting__31, %exit__30 + %538 = phi i64 [ 0, %exit__30 ], [ %549, %exiting__31 ] + %539 = icmp sle i64 %538, %537 + br i1 %539, label %body__31, label %exit__31 + +body__31: ; preds = %header__31 + %540 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %535, i64 %538) + %541 = bitcast i8* %540 to { { i64, %Array* }*, i2, i64 }** + %542 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %541, align 8 + %543 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %542, i32 0, i32 0 + %544 = load { i64, %Array* }*, { i64, %Array* }** %543, align 8 + %545 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %544, i32 0, i32 1 + %546 = load %Array*, %Array** %545, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %546, i32 -1) + %547 = bitcast { i64, %Array* }* %544 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %547, i32 -1) + %548 = bitcast { { i64, %Array* }*, i2, i64 }* %542 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %548, i32 -1) + br label %exiting__31 + +exiting__31: ; preds = %body__31 + %549 = add i64 %538, 1 + br label %header__31 + +exit__31: ; preds = %header__31 + call void @__quantum__rt__array_update_alias_count(%Array* %535, i32 -1) + %550 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %550, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + %551 = sub i64 %nSamples, 1 + br label %header__32 + +header__32: ; preds = %exiting__32, %exit__31 + %552 = phi i64 [ 0, %exit__31 ], [ %560, %exiting__32 ] + %553 = icmp sle i64 %552, %551 + br i1 %553, label %body__32, label %exit__32 + +body__32: ; preds = %header__32 + %554 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %552) + %555 = bitcast i8* %554 to { %Array*, i64 }** + %556 = load { %Array*, i64 }*, { %Array*, i64 }** %555, align 8 + %557 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %556, i32 0, i32 0 + %558 = load %Array*, %Array** %557, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %558, i32 -1) + %559 = bitcast { %Array*, i64 }* %556 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %559, i32 -1) + br label %exiting__32 + +exiting__32: ; preds = %body__32 + %560 = add i64 %552, 1 + br label %header__32 + +exit__32: ; preds = %header__32 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %155, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %155, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + %561 = load %Array*, %Array** %32, align 8 + %562 = call i64 @__quantum__rt__array_get_size_1d(%Array* %561) + %563 = sub i64 %562, 1 + br label %header__33 + +header__33: ; preds = %exiting__33, %exit__32 + %564 = phi i64 [ 0, %exit__32 ], [ %569, %exiting__33 ] + %565 = icmp sle i64 %564, %563 + br i1 %565, label %body__33, label %exit__33 + +body__33: ; preds = %header__33 + %566 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %561, i64 %564) + %567 = bitcast i8* %566 to %Range* + %568 = load %Range, %Range* %567, align 4 + br label %exiting__33 + +exiting__33: ; preds = %body__33 + %569 = add i64 %564, 1 + br label %header__33 + +exit__33: ; preds = %header__33 + call void @__quantum__rt__array_update_alias_count(%Array* %561, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %570 = sub i64 %44, 1 + br label %header__34 + +header__34: ; preds = %exiting__34, %exit__33 + %571 = phi i64 [ 0, %exit__33 ], [ %576, %exiting__34 ] + %572 = icmp sle i64 %571, %570 + br i1 %572, label %body__34, label %exit__34 + +body__34: ; preds = %header__34 + %573 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %571) + %574 = bitcast i8* %573 to %Array** + %575 = load %Array*, %Array** %574, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %575, i32 -1) + br label %exiting__34 + +exiting__34: ; preds = %body__34 + %576 = add i64 %571, 1 + br label %header__34 + +exit__34: ; preds = %header__34 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 -1) + %577 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %224, i32 0, i32 0 + %578 = load %Array*, %Array** %577, align 8 + %579 = call i64 @__quantum__rt__array_get_size_1d(%Array* %578) + %580 = sub i64 %579, 1 + br label %header__35 + +header__35: ; preds = %exiting__35, %exit__34 + %581 = phi i64 [ 0, %exit__34 ], [ %592, %exiting__35 ] + %582 = icmp sle i64 %581, %580 + br i1 %582, label %body__35, label %exit__35 + +body__35: ; preds = %header__35 + %583 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %578, i64 %581) + %584 = bitcast i8* %583 to { { i64, %Array* }*, i2, i64 }** + %585 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %584, align 8 + %586 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %585, i32 0, i32 0 + %587 = load { i64, %Array* }*, { i64, %Array* }** %586, align 8 + %588 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %587, i32 0, i32 1 + %589 = load %Array*, %Array** %588, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %589, i32 -1) + %590 = bitcast { i64, %Array* }* %587 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %590, i32 -1) + %591 = bitcast { { i64, %Array* }*, i2, i64 }* %585 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %591, i32 -1) + br label %exiting__35 + +exiting__35: ; preds = %body__35 + %592 = add i64 %581, 1 + br label %header__35 + +exit__35: ; preds = %header__35 + call void @__quantum__rt__array_update_alias_count(%Array* %578, i32 -1) + %593 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %224, i32 0, i32 1 + %594 = load %Array*, %Array** %593, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %594, i32 -1) + %595 = bitcast { %Array*, %Array*, double }* %224 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %595, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %inferredLabels, i32 -1) + %596 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %176, i32 0, i32 0 + %597 = load %Array*, %Array** %596, align 8 + %598 = call i64 @__quantum__rt__array_get_size_1d(%Array* %597) + %599 = sub i64 %598, 1 + br label %header__36 + +header__36: ; preds = %exiting__36, %exit__35 + %600 = phi i64 [ 0, %exit__35 ], [ %611, %exiting__36 ] + %601 = icmp sle i64 %600, %599 + br i1 %601, label %body__36, label %exit__36 + +body__36: ; preds = %header__36 + %602 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %597, i64 %600) + %603 = bitcast i8* %602 to { { i64, %Array* }*, i2, i64 }** + %604 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %603, align 8 + %605 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %604, i32 0, i32 0 + %606 = load { i64, %Array* }*, { i64, %Array* }** %605, align 8 + %607 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %606, i32 0, i32 1 + %608 = load %Array*, %Array** %607, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %608, i32 -1) + %609 = bitcast { i64, %Array* }* %606 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %609, i32 -1) + %610 = bitcast { { i64, %Array* }*, i2, i64 }* %604 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %610, i32 -1) + br label %exiting__36 + +exiting__36: ; preds = %body__36 + %611 = add i64 %600, 1 + br label %header__36 + +exit__36: ; preds = %header__36 + call void @__quantum__rt__array_update_alias_count(%Array* %597, i32 -1) + %612 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %176, i32 0, i32 1 + %613 = load %Array*, %Array** %612, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %613, i32 -1) + %614 = bitcast { %Array*, %Array*, double }* %176 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %614, i32 -1) + %615 = sub i64 %129, 1 + br label %header__37 + +header__37: ; preds = %exiting__37, %exit__36 + %616 = phi i64 [ 0, %exit__36 ], [ %632, %exiting__37 ] + %617 = icmp sle i64 %616, %615 + br i1 %617, label %body__37, label %exit__37 + +body__37: ; preds = %header__37 + %618 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %616) + %619 = bitcast i8* %618 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %620 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %619, align 8 + %621 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %620, i32 0, i32 0 + %622 = load { %Array*, i64 }*, { %Array*, i64 }** %621, align 8 + %623 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %622, i32 0, i32 0 + %624 = load %Array*, %Array** %623, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %624, i32 -1) + %625 = bitcast { %Array*, i64 }* %622 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %625, i32 -1) + %626 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %620, i32 0, i32 1 + %627 = load { i64, %Callable* }*, { i64, %Callable* }** %626, align 8 + %628 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %627, i32 0, i32 1 + %629 = load %Callable*, %Callable** %628, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %629, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %629, i32 -1) + %630 = bitcast { i64, %Callable* }* %627 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %630, i32 -1) + %631 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %620 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %631, i32 -1) + br label %exiting__37 + +exiting__37: ; preds = %body__37 + %632 = add i64 %616, 1 + br label %header__37 + +exit__37: ; preds = %header__37 + call void @__quantum__rt__array_update_alias_count(%Array* %encodedSamples, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %43, i32 -1) + %633 = sub i64 %44, 1 + br label %header__38 + +header__38: ; preds = %exiting__38, %exit__37 + %634 = phi i64 [ 0, %exit__37 ], [ %639, %exiting__38 ] + %635 = icmp sle i64 %634, %633 + br i1 %635, label %body__38, label %exit__38 + +body__38: ; preds = %header__38 + %636 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %634) + %637 = bitcast i8* %636 to %Array** + %638 = load %Array*, %Array** %637, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %638, i32 -1) + br label %exiting__38 + +exiting__38: ; preds = %body__38 + %639 = add i64 %634, 1 + br label %header__38 + +exit__38: ; preds = %header__38 + call void @__quantum__rt__array_update_reference_count(%Array* %features, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %probabilities, i32 -1) + %640 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %641 = sub i64 %640, 1 + br label %header__39 + +header__39: ; preds = %exiting__39, %exit__38 + %642 = phi i64 [ 0, %exit__38 ], [ %648, %exiting__39 ] + %643 = icmp sle i64 %642, %641 + br i1 %643, label %body__39, label %exit__39 + +body__39: ; preds = %header__39 + %644 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %642) + %645 = bitcast i8* %644 to { double, i64 }** + %646 = load { double, i64 }*, { double, i64 }** %645, align 8 + %647 = bitcast { double, i64 }* %646 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %647, i32 -1) + br label %exiting__39 + +exiting__39: ; preds = %body__39 + %648 = add i64 %642, 1 + br label %header__39 + +exit__39: ; preds = %header__39 + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %inferredLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %225, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %128, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %128, i32 -1) + %649 = sub i64 %129, 1 + br label %header__40 + +header__40: ; preds = %exiting__40, %exit__39 + %650 = phi i64 [ 0, %exit__39 ], [ %666, %exiting__40 ] + %651 = icmp sle i64 %650, %649 + br i1 %651, label %body__40, label %exit__40 + +body__40: ; preds = %header__40 + %652 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %650) + %653 = bitcast i8* %652 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %654 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %653, align 8 + %655 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %654, i32 0, i32 0 + %656 = load { %Array*, i64 }*, { %Array*, i64 }** %655, align 8 + %657 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %656, i32 0, i32 0 + %658 = load %Array*, %Array** %657, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %658, i32 -1) + %659 = bitcast { %Array*, i64 }* %656 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %659, i32 -1) + %660 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %654, i32 0, i32 1 + %661 = load { i64, %Callable* }*, { i64, %Callable* }** %660, align 8 + %662 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %661, i32 0, i32 1 + %663 = load %Callable*, %Callable** %662, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %663, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %663, i32 -1) + %664 = bitcast { i64, %Callable* }* %661 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %664, i32 -1) + %665 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %654 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %665, i32 -1) + br label %exiting__40 + +exiting__40: ; preds = %body__40 + %666 = add i64 %650, 1 + br label %header__40 + +exit__40: ; preds = %header__40 + call void @__quantum__rt__array_update_reference_count(%Array* %encodedSamples, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %160, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %166, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %227, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %227, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %172, i32 -1) + %667 = sub i64 %183, 1 + br label %header__41 + +header__41: ; preds = %exiting__41, %exit__40 + %668 = phi i64 [ 0, %exit__40 ], [ %679, %exiting__41 ] + %669 = icmp sle i64 %668, %667 + br i1 %669, label %body__41, label %exit__41 + +body__41: ; preds = %header__41 + %670 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %668) + %671 = bitcast i8* %670 to { { i64, %Array* }*, i2, i64 }** + %672 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %671, align 8 + %673 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %672, i32 0, i32 0 + %674 = load { i64, %Array* }*, { i64, %Array* }** %673, align 8 + %675 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %674, i32 0, i32 1 + %676 = load %Array*, %Array** %675, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %676, i32 -1) + %677 = bitcast { i64, %Array* }* %674 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %677, i32 -1) + %678 = bitcast { { i64, %Array* }*, i2, i64 }* %672 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %678, i32 -1) + br label %exiting__41 + +exiting__41: ; preds = %body__41 + %679 = add i64 %668, 1 + br label %header__41 + +exit__41: ; preds = %header__41 + call void @__quantum__rt__array_update_reference_count(%Array* %182, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %206, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %207, i32 -1) + %680 = bitcast { i64, { %Array*, %Array*, double }* }* %178 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %680, i32 -1) + %681 = sub i64 %598, 1 + br label %header__42 + +header__42: ; preds = %exiting__42, %exit__41 + %682 = phi i64 [ 0, %exit__41 ], [ %693, %exiting__42 ] + %683 = icmp sle i64 %682, %681 + br i1 %683, label %body__42, label %exit__42 + +body__42: ; preds = %header__42 + %684 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %597, i64 %682) + %685 = bitcast i8* %684 to { { i64, %Array* }*, i2, i64 }** + %686 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %685, align 8 + %687 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %686, i32 0, i32 0 + %688 = load { i64, %Array* }*, { i64, %Array* }** %687, align 8 + %689 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %688, i32 0, i32 1 + %690 = load %Array*, %Array** %689, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %690, i32 -1) + %691 = bitcast { i64, %Array* }* %688 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %691, i32 -1) + %692 = bitcast { { i64, %Array* }*, i2, i64 }* %686 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %692, i32 -1) + br label %exiting__42 + +exiting__42: ; preds = %body__42 + %693 = add i64 %682, 1 + br label %header__42 + +exit__42: ; preds = %header__42 + call void @__quantum__rt__array_update_reference_count(%Array* %597, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %613, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %614, i32 -1) + ret { %Array*, %Array*, double }* %224 + +header__43: ; preds = %exiting__43, %then0__5 + %694 = phi i64 [ 0, %then0__5 ], [ %705, %exiting__43 ] + %695 = icmp sle i64 %694, %253 + br i1 %695, label %body__43, label %exit__43 + +body__43: ; preds = %header__43 + %696 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %251, i64 %694) + %697 = bitcast i8* %696 to { { i64, %Array* }*, i2, i64 }** + %698 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %697, align 8 + %699 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %698, i32 0, i32 0 + %700 = load { i64, %Array* }*, { i64, %Array* }** %699, align 8 + %701 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %700, i32 0, i32 1 + %702 = load %Array*, %Array** %701, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %702, i32 1) + %703 = bitcast { i64, %Array* }* %700 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %703, i32 1) + %704 = bitcast { { i64, %Array* }*, i2, i64 }* %698 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %704, i32 1) + br label %exiting__43 + +exiting__43: ; preds = %body__43 + %705 = add i64 %694, 1 + br label %header__43 + +exit__43: ; preds = %header__43 + call void @__quantum__rt__array_update_alias_count(%Array* %251, i32 1) + %706 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %249, i32 0, i32 1 + %707 = load %Array*, %Array** %706, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %707, i32 1) + %708 = bitcast { %Array*, %Array*, double }* %249 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %708, i32 1) + %709 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %176, i32 0, i32 0 + %710 = load %Array*, %Array** %709, align 8 + %711 = call i64 @__quantum__rt__array_get_size_1d(%Array* %710) + %712 = sub i64 %711, 1 + br label %header__44 + +header__44: ; preds = %exiting__44, %exit__43 + %713 = phi i64 [ 0, %exit__43 ], [ %724, %exiting__44 ] + %714 = icmp sle i64 %713, %712 + br i1 %714, label %body__44, label %exit__44 + +body__44: ; preds = %header__44 + %715 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %710, i64 %713) + %716 = bitcast i8* %715 to { { i64, %Array* }*, i2, i64 }** + %717 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %716, align 8 + %718 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %717, i32 0, i32 0 + %719 = load { i64, %Array* }*, { i64, %Array* }** %718, align 8 + %720 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %719, i32 0, i32 1 + %721 = load %Array*, %Array** %720, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %721, i32 -1) + %722 = bitcast { i64, %Array* }* %719 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %722, i32 -1) + %723 = bitcast { { i64, %Array* }*, i2, i64 }* %717 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %723, i32 -1) + br label %exiting__44 + +exiting__44: ; preds = %body__44 + %724 = add i64 %713, 1 + br label %header__44 + +exit__44: ; preds = %header__44 + call void @__quantum__rt__array_update_alias_count(%Array* %710, i32 -1) + %725 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %176, i32 0, i32 1 + %726 = load %Array*, %Array** %725, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %726, i32 -1) + %727 = bitcast { %Array*, %Array*, double }* %176 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %727, i32 -1) + %728 = sub i64 %711, 1 + br label %header__45 + +header__45: ; preds = %exiting__45, %exit__44 + %729 = phi i64 [ 0, %exit__44 ], [ %740, %exiting__45 ] + %730 = icmp sle i64 %729, %728 + br i1 %730, label %body__45, label %exit__45 + +body__45: ; preds = %header__45 + %731 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %710, i64 %729) + %732 = bitcast i8* %731 to { { i64, %Array* }*, i2, i64 }** + %733 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %732, align 8 + %734 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %733, i32 0, i32 0 + %735 = load { i64, %Array* }*, { i64, %Array* }** %734, align 8 + %736 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %735, i32 0, i32 1 + %737 = load %Array*, %Array** %736, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %737, i32 -1) + %738 = bitcast { i64, %Array* }* %735 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %738, i32 -1) + %739 = bitcast { { i64, %Array* }*, i2, i64 }* %733 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %739, i32 -1) + br label %exiting__45 + +exiting__45: ; preds = %body__45 + %740 = add i64 %729, 1 + br label %header__45 + +exit__45: ; preds = %header__45 + call void @__quantum__rt__array_update_reference_count(%Array* %710, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %726, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %727, i32 -1) + store { %Array*, %Array*, double }* %249, { %Array*, %Array*, double }** %current, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %246, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %246, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %247, i32 -1) + br label %continue__5 + +header__46: ; preds = %exiting__46, %else__1 + %741 = phi i64 [ 0, %else__1 ], [ %752, %exiting__46 ] + %742 = icmp sle i64 %741, %256 + br i1 %742, label %body__46, label %exit__46 + +body__46: ; preds = %header__46 + %743 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %741) + %744 = bitcast i8* %743 to { { i64, %Array* }*, i2, i64 }** + %745 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %744, align 8 + %746 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %745, i32 0, i32 0 + %747 = load { i64, %Array* }*, { i64, %Array* }** %746, align 8 + %748 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %747, i32 0, i32 1 + %749 = load %Array*, %Array** %748, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %749, i32 1) + %750 = bitcast { i64, %Array* }* %747 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %750, i32 1) + %751 = bitcast { { i64, %Array* }*, i2, i64 }* %745 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %751, i32 1) + br label %exiting__46 + +exiting__46: ; preds = %body__46 + %752 = add i64 %741, 1 + br label %header__46 + +exit__46: ; preds = %header__46 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %207, i32 1) + %753 = sub i64 %183, 1 + br label %header__47 + +header__47: ; preds = %exiting__47, %exit__46 + %754 = phi i64 [ 0, %exit__46 ], [ %765, %exiting__47 ] + %755 = icmp sle i64 %754, %753 + br i1 %755, label %body__47, label %exit__47 + +body__47: ; preds = %header__47 + %756 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %754) + %757 = bitcast i8* %756 to { { i64, %Array* }*, i2, i64 }** + %758 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %757, align 8 + %759 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %758, i32 0, i32 0 + %760 = load { i64, %Array* }*, { i64, %Array* }** %759, align 8 + %761 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %760, i32 0, i32 1 + %762 = load %Array*, %Array** %761, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %762, i32 1) + %763 = bitcast { i64, %Array* }* %760 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %763, i32 1) + %764 = bitcast { { i64, %Array* }*, i2, i64 }* %758 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %764, i32 1) + br label %exiting__47 + +exiting__47: ; preds = %body__47 + %765 = add i64 %754, 1 + br label %header__47 + +exit__47: ; preds = %header__47 + call void @__quantum__rt__array_update_reference_count(%Array* %182, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %206, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %207, i32 1) + %766 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %current, align 8 + %767 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %766, i32 0, i32 0 + %768 = load %Array*, %Array** %767, align 8 + %769 = call i64 @__quantum__rt__array_get_size_1d(%Array* %768) + %770 = sub i64 %769, 1 + br label %header__48 + +header__48: ; preds = %exiting__48, %exit__47 + %771 = phi i64 [ 0, %exit__47 ], [ %782, %exiting__48 ] + %772 = icmp sle i64 %771, %770 + br i1 %772, label %body__48, label %exit__48 + +body__48: ; preds = %header__48 + %773 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %768, i64 %771) + %774 = bitcast i8* %773 to { { i64, %Array* }*, i2, i64 }** + %775 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %774, align 8 + %776 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %775, i32 0, i32 0 + %777 = load { i64, %Array* }*, { i64, %Array* }** %776, align 8 + %778 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %777, i32 0, i32 1 + %779 = load %Array*, %Array** %778, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %779, i32 -1) + %780 = bitcast { i64, %Array* }* %777 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %780, i32 -1) + %781 = bitcast { { i64, %Array* }*, i2, i64 }* %775 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %781, i32 -1) + br label %exiting__48 + +exiting__48: ; preds = %body__48 + %782 = add i64 %771, 1 + br label %header__48 + +exit__48: ; preds = %header__48 + call void @__quantum__rt__array_update_alias_count(%Array* %768, i32 -1) + %783 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %766, i32 0, i32 1 + %784 = load %Array*, %Array** %783, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %784, i32 -1) + %785 = bitcast { %Array*, %Array*, double }* %766 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %785, i32 -1) + %786 = sub i64 %769, 1 + br label %header__49 + +header__49: ; preds = %exiting__49, %exit__48 + %787 = phi i64 [ 0, %exit__48 ], [ %798, %exiting__49 ] + %788 = icmp sle i64 %787, %786 + br i1 %788, label %body__49, label %exit__49 + +body__49: ; preds = %header__49 + %789 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %768, i64 %787) + %790 = bitcast i8* %789 to { { i64, %Array* }*, i2, i64 }** + %791 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %790, align 8 + %792 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %791, i32 0, i32 0 + %793 = load { i64, %Array* }*, { i64, %Array* }** %792, align 8 + %794 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %793, i32 0, i32 1 + %795 = load %Array*, %Array** %794, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %795, i32 -1) + %796 = bitcast { i64, %Array* }* %793 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %796, i32 -1) + %797 = bitcast { { i64, %Array* }*, i2, i64 }* %791 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %797, i32 -1) + br label %exiting__49 + +exiting__49: ; preds = %body__49 + %798 = add i64 %787, 1 + br label %header__49 + +exit__49: ; preds = %header__49 + call void @__quantum__rt__array_update_reference_count(%Array* %768, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %784, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %785, i32 -1) + store { %Array*, %Array*, double }* %proposedUpdate, { %Array*, %Array*, double }** %current, align 8 + br label %continue__3 + +header__50: ; preds = %exiting__50, %continue__3 + %799 = phi i64 [ 0, %continue__3 ], [ %810, %exiting__50 ] + %800 = icmp sle i64 %799, %259 + br i1 %800, label %body__50, label %exit__50 + +body__50: ; preds = %header__50 + %801 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %799) + %802 = bitcast i8* %801 to { { i64, %Array* }*, i2, i64 }** + %803 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %802, align 8 + %804 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %803, i32 0, i32 0 + %805 = load { i64, %Array* }*, { i64, %Array* }** %804, align 8 + %806 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %805, i32 0, i32 1 + %807 = load %Array*, %Array** %806, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %807, i32 -1) + %808 = bitcast { i64, %Array* }* %805 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %808, i32 -1) + %809 = bitcast { { i64, %Array* }*, i2, i64 }* %803 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %809, i32 -1) + br label %exiting__50 + +exiting__50: ; preds = %body__50 + %810 = add i64 %799, 1 + br label %header__50 + +exit__50: ; preds = %header__50 + call void @__quantum__rt__array_update_alias_count(%Array* %182, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %206, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %207, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %160, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %166, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %258, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %258, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %172, i32 -1) + %811 = sub i64 %183, 1 + br label %header__51 + +header__51: ; preds = %exiting__51, %exit__50 + %812 = phi i64 [ 0, %exit__50 ], [ %823, %exiting__51 ] + %813 = icmp sle i64 %812, %811 + br i1 %813, label %body__51, label %exit__51 + +body__51: ; preds = %header__51 + %814 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %182, i64 %812) + %815 = bitcast i8* %814 to { { i64, %Array* }*, i2, i64 }** + %816 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %815, align 8 + %817 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %816, i32 0, i32 0 + %818 = load { i64, %Array* }*, { i64, %Array* }** %817, align 8 + %819 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %818, i32 0, i32 1 + %820 = load %Array*, %Array** %819, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %820, i32 -1) + %821 = bitcast { i64, %Array* }* %818 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %821, i32 -1) + %822 = bitcast { { i64, %Array* }*, i2, i64 }* %816 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %822, i32 -1) + br label %exiting__51 + +exiting__51: ; preds = %body__51 + %823 = add i64 %812, 1 + br label %header__51 + +exit__51: ; preds = %header__51 + call void @__quantum__rt__array_update_reference_count(%Array* %182, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %206, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %207, i32 -1) + %824 = bitcast { i64, { %Array*, %Array*, double }* }* %178 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %824, i32 -1) + br label %exiting__9 + +header__52: ; preds = %exiting__52, %exit__9 + %825 = phi i64 [ 0, %exit__9 ], [ %836, %exiting__52 ] + %826 = icmp sle i64 %825, %192 + br i1 %826, label %body__52, label %exit__52 + +body__52: ; preds = %header__52 + %827 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %189, i64 %825) + %828 = bitcast i8* %827 to { { i64, %Array* }*, i2, i64 }** + %829 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %828, align 8 + %830 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %829, i32 0, i32 0 + %831 = load { i64, %Array* }*, { i64, %Array* }** %830, align 8 + %832 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %831, i32 0, i32 1 + %833 = load %Array*, %Array** %832, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %833, i32 1) + %834 = bitcast { i64, %Array* }* %831 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %834, i32 1) + %835 = bitcast { { i64, %Array* }*, i2, i64 }* %829 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %835, i32 1) + br label %exiting__52 + +exiting__52: ; preds = %body__52 + %836 = add i64 %825, 1 + br label %header__52 + +exit__52: ; preds = %header__52 + call void @__quantum__rt__array_update_reference_count(%Array* %189, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %190, i32 1) + %837 = load %Array*, %Array** %0, align 8 + %838 = call i64 @__quantum__rt__array_get_size_1d(%Array* %837) + %839 = sub i64 %838, 1 + br label %header__53 + +header__53: ; preds = %exiting__53, %exit__52 + %840 = phi i64 [ 0, %exit__52 ], [ %851, %exiting__53 ] + %841 = icmp sle i64 %840, %839 + br i1 %841, label %body__53, label %exit__53 + +body__53: ; preds = %header__53 + %842 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %837, i64 %840) + %843 = bitcast i8* %842 to { { i64, %Array* }*, i2, i64 }** + %844 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %843, align 8 + %845 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %844, i32 0, i32 0 + %846 = load { i64, %Array* }*, { i64, %Array* }** %845, align 8 + %847 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %846, i32 0, i32 1 + %848 = load %Array*, %Array** %847, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %848, i32 -1) + %849 = bitcast { i64, %Array* }* %846 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %849, i32 -1) + %850 = bitcast { { i64, %Array* }*, i2, i64 }* %844 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %850, i32 -1) + br label %exiting__53 + +exiting__53: ; preds = %body__53 + %851 = add i64 %840, 1 + br label %header__53 + +exit__53: ; preds = %header__53 + call void @__quantum__rt__array_update_alias_count(%Array* %837, i32 -1) + %852 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %852, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + %853 = sub i64 %nSamples, 1 + br label %header__54 + +header__54: ; preds = %exiting__54, %exit__53 + %854 = phi i64 [ 0, %exit__53 ], [ %862, %exiting__54 ] + %855 = icmp sle i64 %854, %853 + br i1 %855, label %body__54, label %exit__54 + +body__54: ; preds = %header__54 + %856 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %854) + %857 = bitcast i8* %856 to { %Array*, i64 }** + %858 = load { %Array*, i64 }*, { %Array*, i64 }** %857, align 8 + %859 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %858, i32 0, i32 0 + %860 = load %Array*, %Array** %859, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %860, i32 -1) + %861 = bitcast { %Array*, i64 }* %858 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %861, i32 -1) + br label %exiting__54 + +exiting__54: ; preds = %body__54 + %862 = add i64 %854, 1 + br label %header__54 + +exit__54: ; preds = %header__54 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + %863 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %863, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %863, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + %864 = load %Array*, %Array** %32, align 8 + %865 = call i64 @__quantum__rt__array_get_size_1d(%Array* %864) + %866 = sub i64 %865, 1 + br label %header__55 + +header__55: ; preds = %exiting__55, %exit__54 + %867 = phi i64 [ 0, %exit__54 ], [ %872, %exiting__55 ] + %868 = icmp sle i64 %867, %866 + br i1 %868, label %body__55, label %exit__55 + +body__55: ; preds = %header__55 + %869 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %864, i64 %867) + %870 = bitcast i8* %869 to %Range* + %871 = load %Range, %Range* %870, align 4 + br label %exiting__55 + +exiting__55: ; preds = %body__55 + %872 = add i64 %867, 1 + br label %header__55 + +exit__55: ; preds = %header__55 + call void @__quantum__rt__array_update_alias_count(%Array* %864, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %873 = sub i64 %44, 1 + br label %header__56 + +header__56: ; preds = %exiting__56, %exit__55 + %874 = phi i64 [ 0, %exit__55 ], [ %879, %exiting__56 ] + %875 = icmp sle i64 %874, %873 + br i1 %875, label %body__56, label %exit__56 + +body__56: ; preds = %header__56 + %876 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %874) + %877 = bitcast i8* %876 to %Array** + %878 = load %Array*, %Array** %877, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %878, i32 -1) + br label %exiting__56 + +exiting__56: ; preds = %body__56 + %879 = add i64 %874, 1 + br label %header__56 + +exit__56: ; preds = %header__56 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 -1) + %880 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %186, i32 0, i32 0 + %881 = load %Array*, %Array** %880, align 8 + %882 = call i64 @__quantum__rt__array_get_size_1d(%Array* %881) + %883 = sub i64 %882, 1 + br label %header__57 + +header__57: ; preds = %exiting__57, %exit__56 + %884 = phi i64 [ 0, %exit__56 ], [ %895, %exiting__57 ] + %885 = icmp sle i64 %884, %883 + br i1 %885, label %body__57, label %exit__57 + +body__57: ; preds = %header__57 + %886 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %881, i64 %884) + %887 = bitcast i8* %886 to { { i64, %Array* }*, i2, i64 }** + %888 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %887, align 8 + %889 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %888, i32 0, i32 0 + %890 = load { i64, %Array* }*, { i64, %Array* }** %889, align 8 + %891 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %890, i32 0, i32 1 + %892 = load %Array*, %Array** %891, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %892, i32 -1) + %893 = bitcast { i64, %Array* }* %890 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %893, i32 -1) + %894 = bitcast { { i64, %Array* }*, i2, i64 }* %888 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %894, i32 -1) + br label %exiting__57 + +exiting__57: ; preds = %body__57 + %895 = add i64 %884, 1 + br label %header__57 + +exit__57: ; preds = %header__57 + call void @__quantum__rt__array_update_alias_count(%Array* %881, i32 -1) + %896 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %186, i32 0, i32 1 + %897 = load %Array*, %Array** %896, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %897, i32 -1) + %898 = bitcast { %Array*, %Array*, double }* %186 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %898, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %inferredLabels, i32 -1) + %899 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %188, i32 0, i32 0 + %900 = load %Array*, %Array** %899, align 8 + %901 = call i64 @__quantum__rt__array_get_size_1d(%Array* %900) + %902 = sub i64 %901, 1 + br label %header__58 + +header__58: ; preds = %exiting__58, %exit__57 + %903 = phi i64 [ 0, %exit__57 ], [ %914, %exiting__58 ] + %904 = icmp sle i64 %903, %902 + br i1 %904, label %body__58, label %exit__58 + +body__58: ; preds = %header__58 + %905 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %900, i64 %903) + %906 = bitcast i8* %905 to { { i64, %Array* }*, i2, i64 }** + %907 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %906, align 8 + %908 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %907, i32 0, i32 0 + %909 = load { i64, %Array* }*, { i64, %Array* }** %908, align 8 + %910 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %909, i32 0, i32 1 + %911 = load %Array*, %Array** %910, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %911, i32 -1) + %912 = bitcast { i64, %Array* }* %909 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %912, i32 -1) + %913 = bitcast { { i64, %Array* }*, i2, i64 }* %907 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %913, i32 -1) + br label %exiting__58 + +exiting__58: ; preds = %body__58 + %914 = add i64 %903, 1 + br label %header__58 + +exit__58: ; preds = %header__58 + call void @__quantum__rt__array_update_alias_count(%Array* %900, i32 -1) + %915 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %188, i32 0, i32 1 + %916 = load %Array*, %Array** %915, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %916, i32 -1) + %917 = bitcast { %Array*, %Array*, double }* %188 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %917, i32 -1) + %918 = sub i64 %129, 1 + br label %header__59 + +header__59: ; preds = %exiting__59, %exit__58 + %919 = phi i64 [ 0, %exit__58 ], [ %935, %exiting__59 ] + %920 = icmp sle i64 %919, %918 + br i1 %920, label %body__59, label %exit__59 + +body__59: ; preds = %header__59 + %921 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %919) + %922 = bitcast i8* %921 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %923 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %922, align 8 + %924 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %923, i32 0, i32 0 + %925 = load { %Array*, i64 }*, { %Array*, i64 }** %924, align 8 + %926 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %925, i32 0, i32 0 + %927 = load %Array*, %Array** %926, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %927, i32 -1) + %928 = bitcast { %Array*, i64 }* %925 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %928, i32 -1) + %929 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %923, i32 0, i32 1 + %930 = load { i64, %Callable* }*, { i64, %Callable* }** %929, align 8 + %931 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %930, i32 0, i32 1 + %932 = load %Callable*, %Callable** %931, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %932, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %932, i32 -1) + %933 = bitcast { i64, %Callable* }* %930 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %933, i32 -1) + %934 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %923 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %934, i32 -1) + br label %exiting__59 + +exiting__59: ; preds = %body__59 + %935 = add i64 %919, 1 + br label %header__59 + +exit__59: ; preds = %header__59 + call void @__quantum__rt__array_update_alias_count(%Array* %encodedSamples, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %43, i32 -1) + %936 = sub i64 %44, 1 + br label %header__60 + +header__60: ; preds = %exiting__60, %exit__59 + %937 = phi i64 [ 0, %exit__59 ], [ %942, %exiting__60 ] + %938 = icmp sle i64 %937, %936 + br i1 %938, label %body__60, label %exit__60 + +body__60: ; preds = %header__60 + %939 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %937) + %940 = bitcast i8* %939 to %Array** + %941 = load %Array*, %Array** %940, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %941, i32 -1) + br label %exiting__60 + +exiting__60: ; preds = %body__60 + %942 = add i64 %937, 1 + br label %header__60 + +exit__60: ; preds = %header__60 + call void @__quantum__rt__array_update_reference_count(%Array* %features, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %52, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %probabilities, i32 -1) + %943 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %944 = sub i64 %943, 1 + br label %header__61 + +header__61: ; preds = %exiting__61, %exit__60 + %945 = phi i64 [ 0, %exit__60 ], [ %951, %exiting__61 ] + %946 = icmp sle i64 %945, %944 + br i1 %946, label %body__61, label %exit__61 + +body__61: ; preds = %header__61 + %947 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %945) + %948 = bitcast i8* %947 to { double, i64 }** + %949 = load { double, i64 }*, { double, i64 }** %948, align 8 + %950 = bitcast { double, i64 }* %949 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %950, i32 -1) + br label %exiting__61 + +exiting__61: ; preds = %body__61 + %951 = add i64 %945, 1 + br label %header__61 + +exit__61: ; preds = %header__61 + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %inferredLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %187, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %111, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %128, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %128, i32 -1) + %952 = sub i64 %129, 1 + br label %header__62 + +header__62: ; preds = %exiting__62, %exit__61 + %953 = phi i64 [ 0, %exit__61 ], [ %969, %exiting__62 ] + %954 = icmp sle i64 %953, %952 + br i1 %954, label %body__62, label %exit__62 + +body__62: ; preds = %header__62 + %955 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %953) + %956 = bitcast i8* %955 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %957 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %956, align 8 + %958 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %957, i32 0, i32 0 + %959 = load { %Array*, i64 }*, { %Array*, i64 }** %958, align 8 + %960 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %959, i32 0, i32 0 + %961 = load %Array*, %Array** %960, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %961, i32 -1) + %962 = bitcast { %Array*, i64 }* %959 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %962, i32 -1) + %963 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %957, i32 0, i32 1 + %964 = load { i64, %Callable* }*, { i64, %Callable* }** %963, align 8 + %965 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %964, i32 0, i32 1 + %966 = load %Callable*, %Callable** %965, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %966, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %966, i32 -1) + %967 = bitcast { i64, %Callable* }* %964 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %967, i32 -1) + %968 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %957 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %968, i32 -1) + br label %exiting__62 + +exiting__62: ; preds = %body__62 + %969 = add i64 %953, 1 + br label %header__62 + +exit__62: ; preds = %header__62 + call void @__quantum__rt__array_update_reference_count(%Array* %encodedSamples, i32 -1) + %970 = sub i64 %901, 1 + br label %header__63 + +header__63: ; preds = %exiting__63, %exit__62 + %971 = phi i64 [ 0, %exit__62 ], [ %982, %exiting__63 ] + %972 = icmp sle i64 %971, %970 + br i1 %972, label %body__63, label %exit__63 + +body__63: ; preds = %header__63 + %973 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %900, i64 %971) + %974 = bitcast i8* %973 to { { i64, %Array* }*, i2, i64 }** + %975 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %974, align 8 + %976 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %975, i32 0, i32 0 + %977 = load { i64, %Array* }*, { i64, %Array* }** %976, align 8 + %978 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %977, i32 0, i32 1 + %979 = load %Array*, %Array** %978, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %979, i32 -1) + %980 = bitcast { i64, %Array* }* %977 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %980, i32 -1) + %981 = bitcast { { i64, %Array* }*, i2, i64 }* %975 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %981, i32 -1) + br label %exiting__63 + +exiting__63: ; preds = %body__63 + %982 = add i64 %971, 1 + br label %header__63 + +exit__63: ; preds = %header__63 + call void @__quantum__rt__array_update_reference_count(%Array* %900, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %916, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %917, i32 -1) + ret { %Array*, %Array*, double }* %186 +} + +define internal %Array* @Microsoft__Quantum__Arrays___458019d5b77947a88477997a20fc14c5_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { %Array*, i64 }** + %5 = load { %Array*, i64 }*, { %Array*, i64 }** %4, align 8 + %6 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %7, i32 1) + %8 = bitcast { %Array*, i64 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %10 = icmp eq i64 %length, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %12 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %14 = bitcast i8* %13 to { %Array*, i64 }** + %15 = load { %Array*, i64 }*, { %Array*, i64 }** %14, align 8 + %16 = bitcast { %Array*, i64 }* %15 to %Tuple* + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %16, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { %Array* }* + %19 = getelementptr inbounds { %Array* }, { %Array* }* %18, i32 0, i32 0 + %first = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 1) + %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %21 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %22 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %23 = icmp sle i64 %22, %12 + br i1 %23, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %22) + %25 = bitcast i8* %24 to { %Array*, i64 }** + %26 = load { %Array*, i64 }*, { %Array*, i64 }** %25, align 8 + %27 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %26, i32 0, i32 0 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + %29 = bitcast { %Array*, i64 }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %22, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %35, %exiting__3 ] + %32 = icmp sle i64 %31, %21 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 %31) + %34 = bitcast i8* %33 to %Array** + store %Array* %first, %Array** %34, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %first, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %20, %Array** %retval, align 8 + %36 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %37 = phi i64 [ 0, %exit__3 ], [ %42, %exiting__4 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 %37) + %40 = bitcast i8* %39 to %Array** + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %42 = add i64 %37, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %43 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %58, %exiting__5 ] + %44 = icmp sle i64 %idx, %43 + br i1 %44, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %45 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + %46 = call %Array* @__quantum__rt__array_copy(%Array* %45, i1 false) + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %48 = bitcast i8* %47 to { %Array*, i64 }** + %49 = load { %Array*, i64 }*, { %Array*, i64 }** %48, align 8 + %50 = bitcast { %Array*, i64 }* %49 to %Tuple* + %51 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %50, %Tuple* %51) + %52 = bitcast %Tuple* %51 to { %Array* }* + %53 = getelementptr inbounds { %Array* }, { %Array* }* %52, i32 0, i32 0 + %54 = load %Array*, %Array** %53, align 8 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %46, i64 %idx) + %56 = bitcast i8* %55 to %Array** + call void @__quantum__rt__array_update_alias_count(%Array* %54, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 1) + %57 = load %Array*, %Array** %56, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %57, i32 -1) + store %Array* %54, %Array** %56, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 1) + store %Array* %46, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %58 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %59 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %60 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %61 = phi i64 [ 0, %exit__5 ], [ %69, %exiting__6 ] + %62 = icmp sle i64 %61, %60 + br i1 %62, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %61) + %64 = bitcast i8* %63 to { %Array*, i64 }** + %65 = load { %Array*, i64 }*, { %Array*, i64 }** %64, align 8 + %66 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %65, i32 0, i32 0 + %67 = load %Array*, %Array** %66, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %67, i32 -1) + %68 = bitcast { %Array*, i64 }* %65 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %68, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %69 = add i64 %61, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 -1) + %70 = call i64 @__quantum__rt__array_get_size_1d(%Array* %59) + %71 = sub i64 %70, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %72 = phi i64 [ 0, %exit__6 ], [ %77, %exiting__7 ] + %73 = icmp sle i64 %72, %71 + br i1 %73, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %59, i64 %72) + %75 = bitcast i8* %74 to %Array** + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %77 = add i64 %72, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + ret %Array* %59 +} + +define internal void @Microsoft__Quantum__MachineLearning___Features__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, i64 }* + %1 = call %Array* @Microsoft__Quantum__MachineLearning___Features__body({ %Array*, i64 }* %0) + %2 = bitcast %Tuple* %result-tuple to { %Array* }* + %3 = getelementptr inbounds { %Array* }, { %Array* }* %2, i32 0, i32 0 + store %Array* %1, %Array** %3, align 8 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___f34491685bf044f1939458f941be92ef_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { %Array*, i64 }** + %5 = load { %Array*, i64 }*, { %Array*, i64 }** %4, align 8 + %6 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %7, i32 1) + %8 = bitcast { %Array*, i64 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %10 = icmp eq i64 %length, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %12 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %14 = bitcast i8* %13 to { %Array*, i64 }** + %15 = load { %Array*, i64 }*, { %Array*, i64 }** %14, align 8 + %16 = bitcast { %Array*, i64 }* %15 to %Tuple* + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %16, %Tuple* %17) + %18 = bitcast %Tuple* %17 to { i64 }* + %19 = getelementptr inbounds { i64 }, { i64 }* %18, i32 0, i32 0 + %first = load i64, i64* %19, align 4 + %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %21 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %22 = phi i64 [ 0, %then0__1 ], [ %30, %exiting__2 ] + %23 = icmp sle i64 %22, %12 + br i1 %23, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %22) + %25 = bitcast i8* %24 to { %Array*, i64 }** + %26 = load { %Array*, i64 }*, { %Array*, i64 }** %25, align 8 + %27 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %26, i32 0, i32 0 + %28 = load %Array*, %Array** %27, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + %29 = bitcast { %Array*, i64 }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %30 = add i64 %22, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %31 = phi i64 [ 0, %continue__1 ], [ %35, %exiting__3 ] + %32 = icmp sle i64 %31, %21 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 %31) + %34 = bitcast i8* %33 to i64* + store i64 %first, i64* %34, align 4 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %20, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %36 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %50, %exiting__4 ] + %37 = icmp sle i64 %idx, %36 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 -1) + %39 = call %Array* @__quantum__rt__array_copy(%Array* %38, i1 false) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %41 = bitcast i8* %40 to { %Array*, i64 }** + %42 = load { %Array*, i64 }*, { %Array*, i64 }** %41, align 8 + %43 = bitcast { %Array*, i64 }* %42 to %Tuple* + %44 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %43, %Tuple* %44) + %45 = bitcast %Tuple* %44 to { i64 }* + %46 = getelementptr inbounds { i64 }, { i64 }* %45, i32 0, i32 0 + %47 = load i64, i64* %46, align 4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %39, i64 %idx) + %49 = bitcast i8* %48 to i64* + store i64 %47, i64* %49, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 1) + store %Array* %39, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %44, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %50 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %51 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %52 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %53 = phi i64 [ 0, %exit__4 ], [ %61, %exiting__5 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %53) + %56 = bitcast i8* %55 to { %Array*, i64 }** + %57 = load { %Array*, i64 }*, { %Array*, i64 }** %56, align 8 + %58 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %57, i32 0, i32 0 + %59 = load %Array*, %Array** %58, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %59, i32 -1) + %60 = bitcast { %Array*, i64 }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %60, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %61 = add i64 %53, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + ret %Array* %51 +} + +define internal void @Microsoft__Quantum__MachineLearning___Label__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, i64 }* + %1 = call i64 @Microsoft__Quantum__MachineLearning___Label__body({ %Array*, i64 }* %0) + %2 = bitcast %Tuple* %result-tuple to { i64 }* + %3 = getelementptr inbounds { i64 }, { i64 }* %2, i32 0, i32 0 + store i64 %1, i64* %3, align 4 + ret void +} + +define internal %Array* @Microsoft__Quantum__MachineLearning__EstimateClassificationProbabilities__body(double %tolerance, { %Array*, %Array*, double }* %model, %Array* %samples, i64 %nMeasurements) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %27 = call i1 @Microsoft__Quantum__Arrays___b8502d12d9d54d60a228b6d21de14ed7_IsEmpty__body(%Array* %1) + br i1 %27, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__2 + br label %condContinue__1 + +condFalse__1: ; preds = %exit__2 + %28 = sitofp i64 %2 to double + %29 = fdiv double %tolerance, %28 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %effectiveTolerance = phi double [ %tolerance, %condTrue__1 ], [ %29, %condFalse__1 ] + %30 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning__EstimateClassificationProbability__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %31 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %condContinue__1 + %32 = phi i64 [ 0, %condContinue__1 ], [ %43, %exiting__3 ] + %33 = icmp sle i64 %32, %31 + br i1 %33, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %32) + %35 = bitcast i8* %34 to { { i64, %Array* }*, i2, i64 }** + %36 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %35, align 8 + %37 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %36, i32 0, i32 0 + %38 = load { i64, %Array* }*, { i64, %Array* }** %37, align 8 + %39 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %38, i32 0, i32 1 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 1) + %41 = bitcast { i64, %Array* }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 1) + %42 = bitcast { { i64, %Array* }*, i2, i64 }* %36 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %43 = add i64 %32, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %44 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, { %Array*, %Array*, double }*, i64 }* getelementptr ({ %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* null, i32 1) to i64)) + %45 = bitcast %Tuple* %44 to { %Callable*, double, { %Array*, %Array*, double }*, i64 }* + %46 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %45, i32 0, i32 0 + %47 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %45, i32 0, i32 1 + %48 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %45, i32 0, i32 2 + %49 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %45, i32 0, i32 3 + store %Callable* %30, %Callable** %46, align 8 + store double %effectiveTolerance, double* %47, align 8 + store { %Array*, %Array*, double }* %model, { %Array*, %Array*, double }** %48, align 8 + store i64 %nMeasurements, i64* %49, align 4 + %50 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__12__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__11__FunctionTable, %Tuple* %44) + %51 = call %Array* @Microsoft__Quantum__Arrays___db69bea2cd3249c5b832b64e75b5d986_ForEach__body(%Callable* %50, %Array* %samples) + %52 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %53 = phi i64 [ 0, %exit__3 ], [ %64, %exiting__4 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %53) + %56 = bitcast i8* %55 to { { i64, %Array* }*, i2, i64 }** + %57 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %56, align 8 + %58 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %57, i32 0, i32 0 + %59 = load { i64, %Array* }*, { i64, %Array* }** %58, align 8 + %60 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %59, i32 0, i32 1 + %61 = load %Array*, %Array** %60, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %61, i32 -1) + %62 = bitcast { i64, %Array* }* %59 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %62, i32 -1) + %63 = bitcast { { i64, %Array* }*, i2, i64 }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %63, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %64 = add i64 %53, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + %65 = sub i64 %19, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %66 = phi i64 [ 0, %exit__4 ], [ %71, %exiting__5 ] + %67 = icmp sle i64 %66, %65 + br i1 %67, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %66) + %69 = bitcast i8* %68 to %Array** + %70 = load %Array*, %Array** %69, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %70, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %71 = add i64 %66, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %50, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %50, i32 -1) + ret %Array* %51 +} + +define internal double @Microsoft__Quantum__MachineLearning____QsRef0__UpdatedBias____body(%Array* %labeledProbabilities, double %bias, double %tolerance) { +entry: + %max0 = alloca double, align 8 + %min1 = alloca double, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %labeledProbabilities) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %labeledProbabilities, i64 %2) + %5 = bitcast i8* %4 to { double, i64 }** + %6 = load { double, i64 }*, { double, i64 }** %5, align 8 + %7 = bitcast { double, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %labeledProbabilities, i32 1) + store double 1.000000e+00, double* %min1, align 8 + store double 0.000000e+00, double* %max0, align 8 + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %labeledProbabilities, i64 %10) + %13 = bitcast i8* %12 to { double, i64 }** + %14 = load { double, i64 }*, { double, i64 }** %13, align 8 + %15 = getelementptr inbounds { double, i64 }, { double, i64 }* %14, i32 0, i32 0 + %probability = load double, double* %15, align 8 + %16 = getelementptr inbounds { double, i64 }, { double, i64 }* %14, i32 0, i32 1 + %label = load i64, i64* %16, align 4 + %17 = icmp eq i64 %label, 1 + br i1 %17, label %then0__1, label %else__1 + +then0__1: ; preds = %body__2 + %18 = load double, double* %min1, align 8 + %19 = fcmp ogt double %18, %probability + br i1 %19, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + store double %probability, double* %min1, align 8 + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %body__2 + %20 = load double, double* %max0, align 8 + %21 = fcmp olt double %20, %probability + br i1 %21, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + store double %probability, double* %max0, align 8 + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %22 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %23 = load double, double* %max0, align 8 + %24 = load double, double* %min1, align 8 + %25 = fcmp ole double %23, %24 + br i1 %25, label %then0__4, label %continue__4 + +then0__4: ; preds = %exit__2 + %26 = fsub double 1.000000e+00, %23 + %27 = fsub double %26, %24 + %28 = fmul double 5.000000e-01, %27 + %29 = sub i64 %0, 1 + br label %header__3 + +continue__4: ; preds = %exit__2 + %30 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning____QsRef0__MisclassificationRate____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___72deeddd84a741deba305c641ccbb494_Fst__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %32 = call %Array* @Microsoft__Quantum__Arrays___2a89d4e05ab447e5a736535efa7cd8e7_Mapped__body(%Callable* %31, %Array* %labeledProbabilities) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + %33 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___ce11ecc402da481dad234c6ec2301ce8_Snd__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %34 = call %Array* @Microsoft__Quantum__Arrays___1cabe7eb60764be98bab0923a4277ae6_Mapped__body(%Callable* %33, %Array* %labeledProbabilities) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { %Callable*, %Array*, %Array* }* + %37 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %36, i32 0, i32 1 + %39 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %36, i32 0, i32 2 + store %Callable* %30, %Callable** %37, align 8 + store %Array* %32, %Array** %38, align 8 + store %Array* %34, %Array** %39, align 8 + %40 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__9__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__8__FunctionTable, %Tuple* %35) + %41 = load double, double* %max0, align 8 + %42 = fsub double 5.000000e-01, %41 + %43 = load double, double* %min1, align 8 + %44 = fsub double 5.000000e-01, %43 + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { double, double }* + %47 = getelementptr inbounds { double, double }, { double, double }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { double, double }, { double, double }* %46, i32 0, i32 1 + store double %42, double* %47, align 8 + store double %44, double* %48, align 8 + %optimum = call { double, double, i64 }* @Microsoft__Quantum__Optimization__LocalUnivariateMinimum__body(%Callable* %40, { double, double }* %46, double %tolerance) + %49 = bitcast { double, double, i64 }* %optimum to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %49, i32 1) + %50 = getelementptr inbounds { double, double, i64 }, { double, double, i64 }* %optimum, i32 0, i32 0 + %51 = load double, double* %50, align 8 + %52 = sub i64 %0, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %then0__4 + %53 = phi i64 [ 0, %then0__4 ], [ %59, %exiting__3 ] + %54 = icmp sle i64 %53, %29 + br i1 %54, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %labeledProbabilities, i64 %53) + %56 = bitcast i8* %55 to { double, i64 }** + %57 = load { double, i64 }*, { double, i64 }** %56, align 8 + %58 = bitcast { double, i64 }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %59 = add i64 %53, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %labeledProbabilities, i32 -1) + ret double %28 + +header__4: ; preds = %exiting__4, %continue__4 + %60 = phi i64 [ 0, %continue__4 ], [ %66, %exiting__4 ] + %61 = icmp sle i64 %60, %52 + br i1 %61, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %labeledProbabilities, i64 %60) + %63 = bitcast i8* %62 to { double, i64 }** + %64 = load { double, i64 }*, { double, i64 }** %63, align 8 + %65 = bitcast { double, i64 }* %64 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %65, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %66 = add i64 %60, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %labeledProbabilities, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %49, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %40, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %40, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 -1) + ret double %51 +} + +define internal %Array* @Microsoft__Quantum__Arrays___e71c6b9cbb804917a4d7cd04011f2188_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to double* + %7 = load double, double* %6, align 8 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to i64* + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64 }* getelementptr ({ double, i64 }, { double, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { double, i64 }* + %13 = getelementptr inbounds { double, i64 }, { double, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { double, i64 }, { double, i64 }* %12, i32 0, i32 1 + store double %7, double* %13, align 8 + store i64 %10, i64* %14, align 4 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { double, i64 }** + store { double, i64 }* %12, { double, i64 }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { double, i64 }** + %27 = load { double, i64 }*, { double, i64 }** %26, align 8 + %28 = bitcast { double, i64 }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to double* + %36 = load double, double* %35, align 8 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to i64* + %39 = load i64, i64* %38, align 4 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64 }* getelementptr ({ double, i64 }, { double, i64 }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { double, i64 }* + %42 = getelementptr inbounds { double, i64 }, { double, i64 }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { double, i64 }, { double, i64 }* %41, i32 0, i32 1 + store double %36, double* %42, align 8 + store i64 %39, i64* %43, align 4 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { double, i64 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { double, i64 }*, { double, i64 }** %45, align 8 + %47 = bitcast { double, i64 }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { double, i64 }* %41, { double, i64 }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { double, i64 }** + %56 = load { double, i64 }*, { double, i64 }** %55, align 8 + %57 = bitcast { double, i64 }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal %Array* @Microsoft__Quantum__MachineLearning__InferredLabels__body(double %bias, %Array* %probabilities) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning__InferredLabel__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, double }* + %3 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store double %bias, double* %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__14__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__13__FunctionTable, %Tuple* %1) + %6 = call %Array* @Microsoft__Quantum__Arrays___d3aba77e00014a79bc8f48ec51f8fb2a_Mapped__body(%Callable* %5, %Array* %probabilities) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + ret %Array* %6 +} + +define internal %Array* @Microsoft__Quantum__MachineLearning__Misclassifications__body(%Array* %inferredLabels, %Array* %actualLabels) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %inferredLabels, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %actualLabels, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__NotEqualI__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___4676d5f0d26141cfa6e357563bd65669_Zipped__body(%Array* %inferredLabels, %Array* %actualLabels) + %2 = call %Array* @Microsoft__Quantum__Arrays___0a5870f6f00b4e91a76081167d0bc275_Where__body(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %inferredLabels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %5) + %8 = bitcast i8* %7 to { i64, i64 }** + %9 = load { i64, i64 }*, { i64, i64 }** %8, align 8 + %10 = bitcast { i64, i64 }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret %Array* %2 +} + +define internal i64 @Microsoft__Quantum__Math__MaxI__body(i64 %a, i64 %b) { +entry: + %0 = icmp sgt i64 %a, %b + %1 = select i1 %0, i64 %a, i64 %b + ret i64 %1 +} + +define internal i64 @Microsoft__Quantum__MachineLearning__FeatureRegisterSize__body(%Array* %sample) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %sample, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %sample) + %1 = sitofp i64 %0 to double + %2 = call double @Microsoft__Quantum__Math__Lg__body(double %1) + %3 = call i64 @Microsoft__Quantum__Math__Ceiling__body(double %2) + call void @__quantum__rt__array_update_alias_count(%Array* %sample, i32 -1) + ret i64 %3 +} + +define internal i64 @Microsoft__Quantum__MachineLearning__NQubitsRequired__body({ %Array*, %Array*, double }* %model) { +entry: + %lastQubitIndex = alloca i64, align 8 + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + store i64 -1, i64* %lastQubitIndex, align 4 + %19 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %39, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %20) + %23 = bitcast i8* %22 to { { i64, %Array* }*, i2, i64 }** + %gate = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %23, align 8 + %24 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 0 + %25 = load { i64, %Array* }*, { i64, %Array* }** %24, align 8 + %26 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %25, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %28 = bitcast { i64, %Array* }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + %29 = bitcast { { i64, %Array* }*, i2, i64 }* %gate to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__MaxI__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %31 = load i64, i64* %lastQubitIndex, align 4 + %32 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %25, i32 0, i32 0 + %33 = load i64, i64* %32, align 4 + %34 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 0) + %36 = bitcast i8* %35 to i64* + store i64 %33, i64* %36, align 4 + %37 = call %Array* @__quantum__rt__array_concatenate(%Array* %27, %Array* %34) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 1) + %38 = call i64 @Microsoft__Quantum__Arrays___2d4fd0446d3a462ca520e9612ada343e_Fold__body(%Callable* %30, i64 %31, %Array* %37) + store i64 %38, i64* %lastQubitIndex, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %30, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %39 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %40 = load i64, i64* %lastQubitIndex, align 4 + %41 = add i64 %40, 1 + %42 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %43 = phi i64 [ 0, %exit__2 ], [ %54, %exiting__3 ] + %44 = icmp sle i64 %43, %42 + br i1 %44, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %43) + %46 = bitcast i8* %45 to { { i64, %Array* }*, i2, i64 }** + %47 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %46, align 8 + %48 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %47, i32 0, i32 0 + %49 = load { i64, %Array* }*, { i64, %Array* }** %48, align 8 + %50 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %49, i32 0, i32 1 + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 -1) + %52 = bitcast { i64, %Array* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + %53 = bitcast { { i64, %Array* }*, i2, i64 }* %47 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %54 = add i64 %43, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + ret i64 %41 +} + +define internal %Array* @Microsoft__Quantum__Arrays___babf2e1b7d9541c0a4b642aa9d5d6bbf_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { %Array*, i64 }** + %5 = load { %Array*, i64 }*, { %Array*, i64 }** %4, align 8 + %6 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %7, i32 1) + %8 = bitcast { %Array*, i64 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %10 = icmp eq i64 %length, 0 + br i1 %10, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %12 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %14 = bitcast i8* %13 to { %Array*, i64 }** + %15 = load { %Array*, i64 }*, { %Array*, i64 }** %14, align 8 + %16 = bitcast { %Array*, i64 }* %15 to %Tuple* + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }*, { i64, %Callable* }* }* getelementptr ({ { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %16, %Tuple* %17) + %first = bitcast %Tuple* %17 to { { %Array*, i64 }*, { i64, %Callable* }* }* + %18 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %first, i32 0, i32 0 + %19 = load { %Array*, i64 }*, { %Array*, i64 }** %18, align 8 + %20 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %19, i32 0, i32 0 + %21 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %22 = bitcast { %Array*, i64 }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + %23 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %first, i32 0, i32 1 + %24 = load { i64, %Callable* }*, { i64, %Callable* }** %23, align 8 + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 1) + %27 = bitcast { i64, %Callable* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 1) + %28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %29 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %30 = phi i64 [ 0, %then0__1 ], [ %38, %exiting__2 ] + %31 = icmp sle i64 %30, %12 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %30) + %33 = bitcast i8* %32 to { %Array*, i64 }** + %34 = load { %Array*, i64 }*, { %Array*, i64 }** %33, align 8 + %35 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %34, i32 0, i32 0 + %36 = load %Array*, %Array** %35, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %36, i32 -1) + %37 = bitcast { %Array*, i64 }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %37, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %38 = add i64 %30, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %11 + +header__3: ; preds = %exiting__3, %continue__1 + %39 = phi i64 [ 0, %continue__1 ], [ %51, %exiting__3 ] + %40 = icmp sle i64 %39, %29 + br i1 %40, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %39) + %42 = bitcast i8* %41 to { { %Array*, i64 }*, { i64, %Callable* }* }** + store { { %Array*, i64 }*, { i64, %Callable* }* }* %first, { { %Array*, i64 }*, { i64, %Callable* }* }** %42, align 8 + %43 = load { %Array*, i64 }*, { %Array*, i64 }** %18, align 8 + %44 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %43, i32 0, i32 0 + %45 = load %Array*, %Array** %44, align 8 + %46 = load { i64, %Callable* }*, { i64, %Callable* }** %23, align 8 + %47 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %46, i32 0, i32 1 + %48 = load %Callable*, %Callable** %47, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 1) + %49 = bitcast { %Array*, i64 }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %48, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %48, i32 1) + %50 = bitcast { i64, %Callable* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %51 = add i64 %39, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %28, %Array** %retval, align 8 + %52 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %53 = phi i64 [ 0, %exit__3 ], [ %69, %exiting__4 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %53) + %56 = bitcast i8* %55 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %57 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %56, align 8 + %58 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %57, i32 0, i32 0 + %59 = load { %Array*, i64 }*, { %Array*, i64 }** %58, align 8 + %60 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %59, i32 0, i32 0 + %61 = load %Array*, %Array** %60, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %61, i32 1) + %62 = bitcast { %Array*, i64 }* %59 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %62, i32 1) + %63 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %57, i32 0, i32 1 + %64 = load { i64, %Callable* }*, { i64, %Callable* }** %63, align 8 + %65 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %64, i32 0, i32 1 + %66 = load %Callable*, %Callable** %65, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %66, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %66, i32 1) + %67 = bitcast { i64, %Callable* }* %64 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + %68 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %68, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %69 = add i64 %53, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %70 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %104, %exiting__5 ] + %71 = icmp sle i64 %idx, %70 + br i1 %71, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %72 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = call %Array* @__quantum__rt__array_copy(%Array* %72, i1 false) + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %75 = bitcast i8* %74 to { %Array*, i64 }** + %76 = load { %Array*, i64 }*, { %Array*, i64 }** %75, align 8 + %77 = bitcast { %Array*, i64 }* %76 to %Tuple* + %78 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }*, { i64, %Callable* }* }* getelementptr ({ { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %77, %Tuple* %78) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %idx) + %80 = bitcast i8* %79 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %81 = bitcast %Tuple* %78 to { { %Array*, i64 }*, { i64, %Callable* }* }* + %82 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %81, i32 0, i32 0 + %83 = load { %Array*, i64 }*, { %Array*, i64 }** %82, align 8 + %84 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %83, i32 0, i32 0 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 1) + %86 = bitcast { %Array*, i64 }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 1) + %87 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %81, i32 0, i32 1 + %88 = load { i64, %Callable* }*, { i64, %Callable* }** %87, align 8 + %89 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %88, i32 0, i32 1 + %90 = load %Callable*, %Callable** %89, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %90, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %90, i32 1) + %91 = bitcast { i64, %Callable* }* %88 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %91, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + %92 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %80, align 8 + %93 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %92, i32 0, i32 0 + %94 = load { %Array*, i64 }*, { %Array*, i64 }** %93, align 8 + %95 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %94, i32 0, i32 0 + %96 = load %Array*, %Array** %95, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %96, i32 -1) + %97 = bitcast { %Array*, i64 }* %94 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %97, i32 -1) + %98 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %92, i32 0, i32 1 + %99 = load { i64, %Callable* }*, { i64, %Callable* }** %98, align 8 + %100 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %99, i32 0, i32 1 + %101 = load %Callable*, %Callable** %100, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %101, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %101, i32 -1) + %102 = bitcast { i64, %Callable* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %102, i32 -1) + %103 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %92 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %96, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %97, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %101, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %102, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %103, i32 -1) + store { { %Array*, i64 }*, { i64, %Callable* }* }* %81, { { %Array*, i64 }*, { i64, %Callable* }* }** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 1) + store %Array* %73, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %104 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %105 = load %Array*, %Array** %retval, align 8 + %106 = load { %Array*, i64 }*, { %Array*, i64 }** %18, align 8 + %107 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %106, i32 0, i32 0 + %108 = load %Array*, %Array** %107, align 8 + %109 = load { i64, %Callable* }*, { i64, %Callable* }** %23, align 8 + %110 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %109, i32 0, i32 1 + %111 = load %Callable*, %Callable** %110, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %112 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %113 = phi i64 [ 0, %exit__5 ], [ %121, %exiting__6 ] + %114 = icmp sle i64 %113, %112 + br i1 %114, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %113) + %116 = bitcast i8* %115 to { %Array*, i64 }** + %117 = load { %Array*, i64 }*, { %Array*, i64 }** %116, align 8 + %118 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %117, i32 0, i32 0 + %119 = load %Array*, %Array** %118, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %119, i32 -1) + %120 = bitcast { %Array*, i64 }* %117 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %120, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %121 = add i64 %113, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %108, i32 -1) + %122 = bitcast { %Array*, i64 }* %106 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %111, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %111, i32 -1) + %123 = bitcast { i64, %Callable* }* %109 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %123, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + %124 = call i64 @__quantum__rt__array_get_size_1d(%Array* %105) + %125 = sub i64 %124, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %126 = phi i64 [ 0, %exit__6 ], [ %142, %exiting__7 ] + %127 = icmp sle i64 %126, %125 + br i1 %127, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %128 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %126) + %129 = bitcast i8* %128 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %130 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %129, align 8 + %131 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %130, i32 0, i32 0 + %132 = load { %Array*, i64 }*, { %Array*, i64 }** %131, align 8 + %133 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %132, i32 0, i32 0 + %134 = load %Array*, %Array** %133, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %134, i32 -1) + %135 = bitcast { %Array*, i64 }* %132 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %135, i32 -1) + %136 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %130, i32 0, i32 1 + %137 = load { i64, %Callable* }*, { i64, %Callable* }** %136, align 8 + %138 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %137, i32 0, i32 1 + %139 = load %Callable*, %Callable** %138, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %139, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %139, i32 -1) + %140 = bitcast { i64, %Callable* }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %140, i32 -1) + %141 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %130 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %142 = add i64 %126, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %105, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %108, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %123, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + ret %Array* %105 +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, { %Array*, i64 }* }* getelementptr ({ double, i64, { %Array*, i64 }* }, { double, i64, { %Array*, i64 }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { double, i64, { %Array*, i64 }* }* + %7 = getelementptr inbounds { double, i64, { %Array*, i64 }* }, { double, i64, { %Array*, i64 }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { double, i64, { %Array*, i64 }* }, { double, i64, { %Array*, i64 }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { double, i64, { %Array*, i64 }* }, { double, i64, { %Array*, i64 }* }* %6, i32 0, i32 2 + store double %2, double* %7, align 8 + store i64 %4, i64* %8, align 4 + %10 = bitcast %Tuple* %arg-tuple to { %Array*, i64 }* + store { %Array*, i64 }* %10, { %Array*, i64 }** %9, align 8 + %11 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %12 = load %Callable*, %Callable** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %5, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__EncodeSample____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, { %Array*, i64 }* }* + %1 = getelementptr inbounds { double, i64, { %Array*, i64 }* }, { double, i64, { %Array*, i64 }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, { %Array*, i64 }* }, { double, i64, { %Array*, i64 }* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, { %Array*, i64 }* }, { double, i64, { %Array*, i64 }* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load { %Array*, i64 }*, { %Array*, i64 }** %3, align 8 + %7 = call { { %Array*, i64 }*, { i64, %Callable* }* }* @Microsoft__Quantum__MachineLearning____QsRef0__EncodeSample____body(double %4, i64 %5, { %Array*, i64 }* %6) + %8 = bitcast %Tuple* %result-tuple to { { %Array*, i64 }*, { i64, %Callable* }* }* + %9 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %7, i32 0, i32 0 + %12 = load { %Array*, i64 }*, { %Array*, i64 }** %11, align 8 + store { %Array*, i64 }* %12, { %Array*, i64 }** %9, align 8 + %13 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %7, i32 0, i32 1 + %14 = load { i64, %Callable* }*, { i64, %Callable* }** %13, align 8 + store { i64, %Callable* }* %14, { i64, %Callable* }** %10, align 8 + %15 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret void +} + +define internal void @MemoryManagement__2__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__2__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +declare %String* @__quantum__rt__int_to_string(i64) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +define internal { i64, { %Array*, %Array*, double }* }* @Microsoft__Quantum__MachineLearning____QsRef0__RunSingleTrainingEpoch____body(%Array* %encodedSamples, { %Array* }* %schedule, i64 %periodScore, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, { %Array*, %Array*, double }* %model, i64 %nPreviousBestMisses) { +entry: + %bestSoFar = alloca { %Array*, %Array*, double }*, align 8 + %nBestMisses = alloca i64, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %encodedSamples) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %2) + %5 = bitcast i8* %4 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %6 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %5, align 8 + %7 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %6, i32 0, i32 0 + %8 = load { %Array*, i64 }*, { %Array*, i64 }** %7, align 8 + %9 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %8, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %6, i32 0, i32 1 + %13 = load { i64, %Callable* }*, { i64, %Callable* }** %12, align 8 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 1) + %16 = bitcast { i64, %Callable* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %encodedSamples, i32 1) + %19 = getelementptr inbounds { %Array* }, { %Array* }* %schedule, i32 0, i32 0 + %20 = load %Array*, %Array** %19, align 8 + %21 = call i64 @__quantum__rt__array_get_size_1d(%Array* %20) + %22 = sub i64 %21, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %28, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 %23) + %26 = bitcast i8* %25 to %Range* + %27 = load %Range, %Range* %26, align 4 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %28 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %29 = bitcast { %Array* }* %schedule to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 8 + %31 = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %31, i32 1) + %32 = bitcast { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 1) + %33 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + %35 = call i64 @__quantum__rt__array_get_size_1d(%Array* %34) + %36 = sub i64 %35, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %37 = phi i64 [ 0, %exit__2 ], [ %48, %exiting__3 ] + %38 = icmp sle i64 %37, %36 + br i1 %38, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 %37) + %40 = bitcast i8* %39 to { { i64, %Array* }*, i2, i64 }** + %41 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %40, align 8 + %42 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %41, i32 0, i32 0 + %43 = load { i64, %Array* }*, { i64, %Array* }** %42, align 8 + %44 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %43, i32 0, i32 1 + %45 = load %Array*, %Array** %44, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 1) + %46 = bitcast { i64, %Array* }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %46, i32 1) + %47 = bitcast { { i64, %Array* }*, i2, i64 }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %37, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 1) + %49 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + %51 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 1) + store i64 %nPreviousBestMisses, i64* %nBestMisses, align 4 + store { %Array*, %Array*, double }* %model, { %Array*, %Array*, double }** %bestSoFar, align 8 + %52 = sub i64 %35, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %53 = phi i64 [ 0, %exit__3 ], [ %64, %exiting__4 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 %53) + %56 = bitcast i8* %55 to { { i64, %Array* }*, i2, i64 }** + %57 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %56, align 8 + %58 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %57, i32 0, i32 0 + %59 = load { i64, %Array* }*, { i64, %Array* }** %58, align 8 + %60 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %59, i32 0, i32 1 + %61 = load %Array*, %Array** %60, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %61, i32 1) + %62 = bitcast { i64, %Array* }* %59 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %62, i32 1) + %63 = bitcast { { i64, %Array* }*, i2, i64 }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %63, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %64 = add i64 %53, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 1) + %65 = sub i64 %35, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %66 = phi i64 [ 0, %exit__4 ], [ %77, %exiting__5 ] + %67 = icmp sle i64 %66, %65 + br i1 %67, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %68 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 %66) + %69 = bitcast i8* %68 to { { i64, %Array* }*, i2, i64 }** + %70 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %69, align 8 + %71 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %70, i32 0, i32 0 + %72 = load { i64, %Array* }*, { i64, %Array* }** %71, align 8 + %73 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %72, i32 0, i32 1 + %74 = load %Array*, %Array** %73, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %74, i32 1) + %75 = bitcast { i64, %Array* }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 1) + %76 = bitcast { { i64, %Array* }*, i2, i64 }* %70 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %76, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %77 = add i64 %66, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 1) + %78 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___1f5badf5e91544c8bbff3b59164a3bb0_Fst__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %samples = call %Array* @Microsoft__Quantum__Arrays___9021424cc3274213b24cdef7f22a1dcc_Mapped__body(%Callable* %78, %Array* %encodedSamples) + %79 = call i64 @__quantum__rt__array_get_size_1d(%Array* %samples) + %80 = sub i64 %79, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %81 = phi i64 [ 0, %exit__5 ], [ %89, %exiting__6 ] + %82 = icmp sle i64 %81, %80 + br i1 %82, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %81) + %84 = bitcast i8* %83 to { %Array*, i64 }** + %85 = load { %Array*, i64 }*, { %Array*, i64 }** %84, align 8 + %86 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %85, i32 0, i32 0 + %87 = load %Array*, %Array** %86, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %87, i32 1) + %88 = bitcast { %Array*, i64 }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %88, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %89 = add i64 %81, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 1) + %90 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___e2a028c390684ab28246f52a0c3fbae9_Snd__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %stateGenerators = call %Array* @Microsoft__Quantum__Arrays___01f2a4172bae4fb9a526206ae21b7f8f_Mapped__body(%Callable* %90, %Array* %encodedSamples) + %91 = call i64 @__quantum__rt__array_get_size_1d(%Array* %stateGenerators) + %92 = sub i64 %91, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %93 = phi i64 [ 0, %exit__6 ], [ %101, %exiting__7 ] + %94 = icmp sle i64 %93, %92 + br i1 %94, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %95 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %stateGenerators, i64 %93) + %96 = bitcast i8* %95 to { i64, %Callable* }** + %97 = load { i64, %Callable* }*, { i64, %Callable* }** %96, align 8 + %98 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %97, i32 0, i32 1 + %99 = load %Callable*, %Callable** %98, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %99, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %99, i32 1) + %100 = bitcast { i64, %Callable* }* %97 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %100, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %101 = add i64 %93, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %stateGenerators, i32 1) + %102 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning___Features__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %features = call %Array* @Microsoft__Quantum__Arrays___458019d5b77947a88477997a20fc14c5_Mapped__body(%Callable* %102, %Array* %samples) + %103 = call i64 @__quantum__rt__array_get_size_1d(%Array* %features) + %104 = sub i64 %103, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %105 = phi i64 [ 0, %exit__7 ], [ %110, %exiting__8 ] + %106 = icmp sle i64 %105, %104 + br i1 %106, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %105) + %108 = bitcast i8* %107 to %Array** + %109 = load %Array*, %Array** %108, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %109, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %110 = add i64 %105, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 1) + %111 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning___Label__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %actualLabels = call %Array* @Microsoft__Quantum__Arrays___f34491685bf044f1939458f941be92ef_Mapped__body(%Callable* %111, %Array* %samples) + call void @__quantum__rt__array_update_alias_count(%Array* %actualLabels, i32 1) + %112 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 2 + %113 = load double, double* %112, align 8 + %114 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 1 + %115 = load double, double* %114, align 8 + %116 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 3 + %117 = load i64, i64* %116, align 4 + %118 = call %Array* @Microsoft__Quantum__MachineLearning__EstimateClassificationProbabilities__body(double %115, { %Array*, %Array*, double }* %model, %Array* %features, i64 %117) + %inferredLabels = call %Array* @Microsoft__Quantum__MachineLearning__InferredLabels__body(double %113, %Array* %118) + call void @__quantum__rt__array_update_alias_count(%Array* %inferredLabels, i32 1) + %119 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___8a3dda3255e547b68a0799da4c61f944_Subarray__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %120 = sub i64 %0, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %121 = phi i64 [ 0, %exit__8 ], [ %137, %exiting__9 ] + %122 = icmp sle i64 %121, %120 + br i1 %122, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %123 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %121) + %124 = bitcast i8* %123 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %125 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %124, align 8 + %126 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %125, i32 0, i32 0 + %127 = load { %Array*, i64 }*, { %Array*, i64 }** %126, align 8 + %128 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %127, i32 0, i32 0 + %129 = load %Array*, %Array** %128, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %129, i32 1) + %130 = bitcast { %Array*, i64 }* %127 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %130, i32 1) + %131 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %125, i32 0, i32 1 + %132 = load { i64, %Callable* }*, { i64, %Callable* }** %131, align 8 + %133 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %132, i32 0, i32 1 + %134 = load %Callable*, %Callable** %133, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %134, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %134, i32 1) + %135 = bitcast { i64, %Callable* }* %132 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %135, i32 1) + %136 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %125 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %136, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %137 = add i64 %121, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %encodedSamples, i32 1) + %138 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %139 = bitcast %Tuple* %138 to { %Callable*, %Array* }* + %140 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %139, i32 0, i32 0 + %141 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %139, i32 0, i32 1 + store %Callable* %119, %Callable** %140, align 8 + store %Array* %encodedSamples, %Array** %141, align 8 + %142 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__7__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__6__FunctionTable, %Tuple* %138) + %143 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 2 + %144 = load i64, i64* %143, align 4 + %145 = call %Array* @Microsoft__Quantum__MachineLearning__Misclassifications__body(%Array* %inferredLabels, %Array* %actualLabels) + %146 = call %Array* @Microsoft__Quantum__Arrays___924d100a2fcc4f628511e045bf39e089_Chunks__body(i64 %144, %Array* %145) + %minibatches = call %Array* @Microsoft__Quantum__Arrays___f496bf5745d24576bdaad9599407fb79_Mapped__body(%Callable* %142, %Array* %146) + %147 = call i64 @__quantum__rt__array_get_size_1d(%Array* %minibatches) + %148 = sub i64 %147, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %149 = phi i64 [ 0, %exit__9 ], [ %156, %exiting__10 ] + %150 = icmp sle i64 %149, %148 + br i1 %150, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %151 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %minibatches, i64 %149) + %152 = bitcast i8* %151 to %Array** + %153 = load %Array*, %Array** %152, align 8 + %154 = call i64 @__quantum__rt__array_get_size_1d(%Array* %153) + %155 = sub i64 %154, 1 + br label %header__11 + +exiting__10: ; preds = %exit__11 + %156 = add i64 %149, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %minibatches, i32 1) + %157 = call %Array* @Microsoft__Quantum__Arrays___bfbc686941de40cda88afed8ead5a62b_Enumerated__body(%Array* %minibatches) + %158 = call i64 @__quantum__rt__array_get_size_1d(%Array* %157) + %159 = sub i64 %158, 1 + br label %header__12 + +header__11: ; preds = %exiting__11, %body__10 + %160 = phi i64 [ 0, %body__10 ], [ %176, %exiting__11 ] + %161 = icmp sle i64 %160, %155 + br i1 %161, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %153, i64 %160) + %163 = bitcast i8* %162 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %164 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %163, align 8 + %165 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %164, i32 0, i32 0 + %166 = load { %Array*, i64 }*, { %Array*, i64 }** %165, align 8 + %167 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %166, i32 0, i32 0 + %168 = load %Array*, %Array** %167, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %168, i32 1) + %169 = bitcast { %Array*, i64 }* %166 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %169, i32 1) + %170 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %164, i32 0, i32 1 + %171 = load { i64, %Callable* }*, { i64, %Callable* }** %170, align 8 + %172 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %171, i32 0, i32 1 + %173 = load %Callable*, %Callable** %172, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %173, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %173, i32 1) + %174 = bitcast { i64, %Callable* }* %171 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %174, i32 1) + %175 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %164 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %175, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %176 = add i64 %160, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %153, i32 1) + br label %exiting__10 + +header__12: ; preds = %exiting__12, %exit__10 + %177 = phi i64 [ 0, %exit__10 ], [ %186, %exiting__12 ] + %178 = icmp sle i64 %177, %159 + br i1 %178, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %179 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %157, i64 %177) + %180 = bitcast i8* %179 to { i64, %Array* }** + %181 = load { i64, %Array* }*, { i64, %Array* }** %180, align 8 + %182 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %181, i32 0, i32 0 + %idxMinibatch = load i64, i64* %182, align 4 + %183 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %181, i32 0, i32 1 + %minibatch = load %Array*, %Array** %183, align 8 + %184 = call i64 @__quantum__rt__array_get_size_1d(%Array* %minibatch) + %185 = sub i64 %184, 1 + br label %header__13 + +exiting__12: ; preds = %exit__22 + %186 = add i64 %177, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + %187 = load i64, i64* %nBestMisses, align 4 + %188 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %bestSoFar, align 8 + %189 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %188, i32 0, i32 0 + %190 = load %Array*, %Array** %189, align 8 + %191 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %188, i32 0, i32 1 + %192 = load %Array*, %Array** %191, align 8 + %193 = call i64 @__quantum__rt__array_get_size_1d(%Array* %190) + %194 = sub i64 %193, 1 + br label %header__23 + +header__13: ; preds = %exiting__13, %body__12 + %195 = phi i64 [ 0, %body__12 ], [ %211, %exiting__13 ] + %196 = icmp sle i64 %195, %185 + br i1 %196, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %197 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %minibatch, i64 %195) + %198 = bitcast i8* %197 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %199 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %198, align 8 + %200 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %199, i32 0, i32 0 + %201 = load { %Array*, i64 }*, { %Array*, i64 }** %200, align 8 + %202 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %201, i32 0, i32 0 + %203 = load %Array*, %Array** %202, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %203, i32 1) + %204 = bitcast { %Array*, i64 }* %201 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %204, i32 1) + %205 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %199, i32 0, i32 1 + %206 = load { i64, %Callable* }*, { i64, %Callable* }** %205, align 8 + %207 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %206, i32 0, i32 1 + %208 = load %Callable*, %Callable** %207, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %208, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %208, i32 1) + %209 = bitcast { i64, %Callable* }* %206 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 1) + %210 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %199 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %210, i32 1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %211 = add i64 %195, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %minibatch, i32 1) + %212 = load %Callable*, %Callable** %30, align 8 + %213 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @4, i32 0, i32 0)) + %214 = call %String* @__quantum__rt__int_to_string(i64 %idxMinibatch) + %215 = call %String* @__quantum__rt__string_concatenate(%String* %213, %String* %214) + call void @__quantum__rt__string_update_reference_count(%String* %213, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %214, i32 -1) + %216 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @5, i32 0, i32 0)) + %217 = call %String* @__quantum__rt__string_concatenate(%String* %215, %String* %216) + call void @__quantum__rt__string_update_reference_count(%String* %215, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %216, i32 -1) + %218 = call %String* @__quantum__rt__int_to_string(i64 %147) + %219 = call %String* @__quantum__rt__string_concatenate(%String* %217, %String* %218) + call void @__quantum__rt__string_update_reference_count(%String* %217, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %218, i32 -1) + %220 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + %221 = call %String* @__quantum__rt__string_concatenate(%String* %219, %String* %220) + call void @__quantum__rt__string_update_reference_count(%String* %219, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %220, i32 -1) + %222 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %String* }* getelementptr ({ %String* }, { %String* }* null, i32 1) to i64)) + %223 = bitcast %Tuple* %222 to { %String* }* + %224 = getelementptr inbounds { %String* }, { %String* }* %223, i32 0, i32 0 + store %String* %221, %String** %224, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %212, %Tuple* %222, %Tuple* null) + %225 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %bestSoFar, align 8 + %226 = call { double, { %Array*, %Array*, double }* }* @Microsoft__Quantum__MachineLearning____QsRef0__RunSingleTrainingStep____body(%Array* %minibatch, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, { %Array*, %Array*, double }* %225) + %227 = getelementptr inbounds { double, { %Array*, %Array*, double }* }, { double, { %Array*, %Array*, double }* }* %226, i32 0, i32 0 + %utility = load double, double* %227, align 8 + %228 = getelementptr inbounds { double, { %Array*, %Array*, double }* }, { double, { %Array*, %Array*, double }* }* %226, i32 0, i32 1 + %updatedModel = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %228, align 8 + %229 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %updatedModel, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + %231 = call i64 @__quantum__rt__array_get_size_1d(%Array* %230) + %232 = sub i64 %231, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %233 = phi i64 [ 0, %exit__13 ], [ %244, %exiting__14 ] + %234 = icmp sle i64 %233, %232 + br i1 %234, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %235 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %230, i64 %233) + %236 = bitcast i8* %235 to { { i64, %Array* }*, i2, i64 }** + %237 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %236, align 8 + %238 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %237, i32 0, i32 0 + %239 = load { i64, %Array* }*, { i64, %Array* }** %238, align 8 + %240 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %239, i32 0, i32 1 + %241 = load %Array*, %Array** %240, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %241, i32 1) + %242 = bitcast { i64, %Array* }* %239 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %242, i32 1) + %243 = bitcast { { i64, %Array* }*, i2, i64 }* %237 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %243, i32 1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %244 = add i64 %233, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 1) + %245 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %updatedModel, i32 0, i32 1 + %246 = load %Array*, %Array** %245, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 1) + %247 = bitcast { %Array*, %Array*, double }* %updatedModel to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 1) + %248 = fcmp ogt double %utility, 0x3E7AD7F29ABCAF48 + br i1 %248, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__14 + %249 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([82 x i8], [82 x i8]* @6, i32 0, i32 0)) + %250 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %String* }* getelementptr ({ %String* }, { %String* }* null, i32 1) to i64)) + %251 = bitcast %Tuple* %250 to { %String* }* + %252 = getelementptr inbounds { %String* }, { %String* }* %251, i32 0, i32 0 + store %String* %249, %String** %252, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %212, %Tuple* %250, %Tuple* null) + %253 = load double, double* %114, align 8 + %254 = load i64, i64* %116, align 4 + %probabilities = call %Array* @Microsoft__Quantum__MachineLearning__EstimateClassificationProbabilities__body(double %253, { %Array*, %Array*, double }* %updatedModel, %Array* %features, i64 %254) + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 1) + %255 = call %Array* @Microsoft__Quantum__Arrays___e71c6b9cbb804917a4d7cd04011f2188_Zipped__body(%Array* %probabilities, %Array* %actualLabels) + %256 = load double, double* %112, align 8 + %updatedBias = call double @Microsoft__Quantum__MachineLearning____QsRef0__UpdatedBias____body(%Array* %255, double %256, double %253) + %updatedLabels = call %Array* @Microsoft__Quantum__MachineLearning__InferredLabels__body(double %updatedBias, %Array* %probabilities) + call void @__quantum__rt__array_update_alias_count(%Array* %updatedLabels, i32 1) + %257 = call %Array* @Microsoft__Quantum__MachineLearning__Misclassifications__body(%Array* %updatedLabels, %Array* %actualLabels) + %nMisses = call i64 @__quantum__rt__array_get_size_1d(%Array* %257) + %258 = load i64, i64* %nBestMisses, align 4 + %259 = icmp slt i64 %nMisses, %258 + br i1 %259, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + store i64 %nMisses, i64* %nBestMisses, align 4 + %260 = sub i64 %231, 1 + br label %header__15 + +continue__2: ; preds = %exit__18, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %updatedLabels, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %249, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %250, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %probabilities, i32 -1) + %261 = call i64 @__quantum__rt__array_get_size_1d(%Array* %255) + %262 = sub i64 %261, 1 + br label %header__19 + +continue__1: ; preds = %exit__19, %exit__14 + %263 = sub i64 %184, 1 + br label %header__20 + +header__15: ; preds = %exiting__15, %then0__2 + %264 = phi i64 [ 0, %then0__2 ], [ %275, %exiting__15 ] + %265 = icmp sle i64 %264, %260 + br i1 %265, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %266 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %230, i64 %264) + %267 = bitcast i8* %266 to { { i64, %Array* }*, i2, i64 }** + %268 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %267, align 8 + %269 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %268, i32 0, i32 0 + %270 = load { i64, %Array* }*, { i64, %Array* }** %269, align 8 + %271 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %270, i32 0, i32 1 + %272 = load %Array*, %Array** %271, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %272, i32 1) + %273 = bitcast { i64, %Array* }* %270 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %273, i32 1) + %274 = bitcast { { i64, %Array* }*, i2, i64 }* %268 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %274, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %275 = add i64 %264, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 1) + %276 = sub i64 %231, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %277 = phi i64 [ 0, %exit__15 ], [ %288, %exiting__16 ] + %278 = icmp sle i64 %277, %276 + br i1 %278, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %279 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %230, i64 %277) + %280 = bitcast i8* %279 to { { i64, %Array* }*, i2, i64 }** + %281 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %280, align 8 + %282 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %281, i32 0, i32 0 + %283 = load { i64, %Array* }*, { i64, %Array* }** %282, align 8 + %284 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %283, i32 0, i32 1 + %285 = load %Array*, %Array** %284, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %285, i32 1) + %286 = bitcast { i64, %Array* }* %283 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %286, i32 1) + %287 = bitcast { { i64, %Array* }*, i2, i64 }* %281 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %287, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %288 = add i64 %277, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_reference_count(%Array* %230, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %246, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %247, i32 1) + %289 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %225, i32 0, i32 0 + %290 = load %Array*, %Array** %289, align 8 + %291 = call i64 @__quantum__rt__array_get_size_1d(%Array* %290) + %292 = sub i64 %291, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %293 = phi i64 [ 0, %exit__16 ], [ %304, %exiting__17 ] + %294 = icmp sle i64 %293, %292 + br i1 %294, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %295 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %293) + %296 = bitcast i8* %295 to { { i64, %Array* }*, i2, i64 }** + %297 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %296, align 8 + %298 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %297, i32 0, i32 0 + %299 = load { i64, %Array* }*, { i64, %Array* }** %298, align 8 + %300 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %299, i32 0, i32 1 + %301 = load %Array*, %Array** %300, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %301, i32 -1) + %302 = bitcast { i64, %Array* }* %299 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %302, i32 -1) + %303 = bitcast { { i64, %Array* }*, i2, i64 }* %297 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %303, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %304 = add i64 %293, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %290, i32 -1) + %305 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %225, i32 0, i32 1 + %306 = load %Array*, %Array** %305, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %306, i32 -1) + %307 = bitcast { %Array*, %Array*, double }* %225 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %307, i32 -1) + %308 = sub i64 %291, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %309 = phi i64 [ 0, %exit__17 ], [ %320, %exiting__18 ] + %310 = icmp sle i64 %309, %308 + br i1 %310, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %311 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %290, i64 %309) + %312 = bitcast i8* %311 to { { i64, %Array* }*, i2, i64 }** + %313 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %312, align 8 + %314 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %313, i32 0, i32 0 + %315 = load { i64, %Array* }*, { i64, %Array* }** %314, align 8 + %316 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %315, i32 0, i32 1 + %317 = load %Array*, %Array** %316, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %317, i32 -1) + %318 = bitcast { i64, %Array* }* %315 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %318, i32 -1) + %319 = bitcast { { i64, %Array* }*, i2, i64 }* %313 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %319, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %320 = add i64 %309, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_reference_count(%Array* %290, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %306, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %307, i32 -1) + store { %Array*, %Array*, double }* %updatedModel, { %Array*, %Array*, double }** %bestSoFar, align 8 + br label %continue__2 + +header__19: ; preds = %exiting__19, %continue__2 + %321 = phi i64 [ 0, %continue__2 ], [ %327, %exiting__19 ] + %322 = icmp sle i64 %321, %262 + br i1 %322, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %323 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %255, i64 %321) + %324 = bitcast i8* %323 to { double, i64 }** + %325 = load { double, i64 }*, { double, i64 }** %324, align 8 + %326 = bitcast { double, i64 }* %325 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %326, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %327 = add i64 %321, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_reference_count(%Array* %255, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %updatedLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %257, i32 -1) + br label %continue__1 + +header__20: ; preds = %exiting__20, %continue__1 + %328 = phi i64 [ 0, %continue__1 ], [ %344, %exiting__20 ] + %329 = icmp sle i64 %328, %263 + br i1 %329, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %330 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %minibatch, i64 %328) + %331 = bitcast i8* %330 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %332 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %331, align 8 + %333 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %332, i32 0, i32 0 + %334 = load { %Array*, i64 }*, { %Array*, i64 }** %333, align 8 + %335 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %334, i32 0, i32 0 + %336 = load %Array*, %Array** %335, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %336, i32 -1) + %337 = bitcast { %Array*, i64 }* %334 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %337, i32 -1) + %338 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %332, i32 0, i32 1 + %339 = load { i64, %Callable* }*, { i64, %Callable* }** %338, align 8 + %340 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %339, i32 0, i32 1 + %341 = load %Callable*, %Callable** %340, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %341, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %341, i32 -1) + %342 = bitcast { i64, %Callable* }* %339 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %342, i32 -1) + %343 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %332 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %343, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %344 = add i64 %328, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %minibatch, i32 -1) + %345 = sub i64 %231, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %346 = phi i64 [ 0, %exit__20 ], [ %357, %exiting__21 ] + %347 = icmp sle i64 %346, %345 + br i1 %347, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %348 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %230, i64 %346) + %349 = bitcast i8* %348 to { { i64, %Array* }*, i2, i64 }** + %350 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %349, align 8 + %351 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %350, i32 0, i32 0 + %352 = load { i64, %Array* }*, { i64, %Array* }** %351, align 8 + %353 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %352, i32 0, i32 1 + %354 = load %Array*, %Array** %353, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %354, i32 -1) + %355 = bitcast { i64, %Array* }* %352 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %355, i32 -1) + %356 = bitcast { { i64, %Array* }*, i2, i64 }* %350 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %356, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %357 = add i64 %346, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %246, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %221, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %222, i32 -1) + %358 = sub i64 %231, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %359 = phi i64 [ 0, %exit__21 ], [ %370, %exiting__22 ] + %360 = icmp sle i64 %359, %358 + br i1 %360, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %361 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %230, i64 %359) + %362 = bitcast i8* %361 to { { i64, %Array* }*, i2, i64 }** + %363 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %362, align 8 + %364 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %363, i32 0, i32 0 + %365 = load { i64, %Array* }*, { i64, %Array* }** %364, align 8 + %366 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %365, i32 0, i32 1 + %367 = load %Array*, %Array** %366, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %367, i32 -1) + %368 = bitcast { i64, %Array* }* %365 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %368, i32 -1) + %369 = bitcast { { i64, %Array* }*, i2, i64 }* %363 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %369, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %370 = add i64 %359, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_reference_count(%Array* %230, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %246, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %247, i32 -1) + %371 = bitcast { double, { %Array*, %Array*, double }* }* %226 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %371, i32 -1) + br label %exiting__12 + +header__23: ; preds = %exiting__23, %exit__12 + %372 = phi i64 [ 0, %exit__12 ], [ %383, %exiting__23 ] + %373 = icmp sle i64 %372, %194 + br i1 %373, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %374 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %190, i64 %372) + %375 = bitcast i8* %374 to { { i64, %Array* }*, i2, i64 }** + %376 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %375, align 8 + %377 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %376, i32 0, i32 0 + %378 = load { i64, %Array* }*, { i64, %Array* }** %377, align 8 + %379 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %378, i32 0, i32 1 + %380 = load %Array*, %Array** %379, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %380, i32 1) + %381 = bitcast { i64, %Array* }* %378 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %381, i32 1) + %382 = bitcast { { i64, %Array* }*, i2, i64 }* %376 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %382, i32 1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %383 = add i64 %372, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_reference_count(%Array* %190, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %192, i32 1) + %384 = bitcast { %Array*, %Array*, double }* %188 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %384, i32 1) + %385 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { %Array*, %Array*, double }* }* getelementptr ({ i64, { %Array*, %Array*, double }* }, { i64, { %Array*, %Array*, double }* }* null, i32 1) to i64)) + %386 = bitcast %Tuple* %385 to { i64, { %Array*, %Array*, double }* }* + %387 = getelementptr inbounds { i64, { %Array*, %Array*, double }* }, { i64, { %Array*, %Array*, double }* }* %386, i32 0, i32 0 + %388 = getelementptr inbounds { i64, { %Array*, %Array*, double }* }, { i64, { %Array*, %Array*, double }* }* %386, i32 0, i32 1 + store i64 %187, i64* %387, align 4 + store { %Array*, %Array*, double }* %188, { %Array*, %Array*, double }** %388, align 8 + %389 = sub i64 %0, 1 + br label %header__24 + +header__24: ; preds = %exiting__24, %exit__23 + %390 = phi i64 [ 0, %exit__23 ], [ %406, %exiting__24 ] + %391 = icmp sle i64 %390, %389 + br i1 %391, label %body__24, label %exit__24 + +body__24: ; preds = %header__24 + %392 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %encodedSamples, i64 %390) + %393 = bitcast i8* %392 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %394 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %393, align 8 + %395 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %394, i32 0, i32 0 + %396 = load { %Array*, i64 }*, { %Array*, i64 }** %395, align 8 + %397 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %396, i32 0, i32 0 + %398 = load %Array*, %Array** %397, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %398, i32 -1) + %399 = bitcast { %Array*, i64 }* %396 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %399, i32 -1) + %400 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %394, i32 0, i32 1 + %401 = load { i64, %Callable* }*, { i64, %Callable* }** %400, align 8 + %402 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %401, i32 0, i32 1 + %403 = load %Callable*, %Callable** %402, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %403, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %403, i32 -1) + %404 = bitcast { i64, %Callable* }* %401 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %404, i32 -1) + %405 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %394 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %405, i32 -1) + br label %exiting__24 + +exiting__24: ; preds = %body__24 + %406 = add i64 %390, 1 + br label %header__24 + +exit__24: ; preds = %header__24 + call void @__quantum__rt__array_update_alias_count(%Array* %encodedSamples, i32 -1) + %407 = sub i64 %21, 1 + br label %header__25 + +header__25: ; preds = %exiting__25, %exit__24 + %408 = phi i64 [ 0, %exit__24 ], [ %413, %exiting__25 ] + %409 = icmp sle i64 %408, %407 + br i1 %409, label %body__25, label %exit__25 + +body__25: ; preds = %header__25 + %410 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %20, i64 %408) + %411 = bitcast i8* %410 to %Range* + %412 = load %Range, %Range* %411, align 4 + br label %exiting__25 + +exiting__25: ; preds = %body__25 + %413 = add i64 %408, 1 + br label %header__25 + +exit__25: ; preds = %header__25 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + %414 = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %414, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %414, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + %415 = sub i64 %35, 1 + br label %header__26 + +header__26: ; preds = %exiting__26, %exit__25 + %416 = phi i64 [ 0, %exit__25 ], [ %427, %exiting__26 ] + %417 = icmp sle i64 %416, %415 + br i1 %417, label %body__26, label %exit__26 + +body__26: ; preds = %header__26 + %418 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 %416) + %419 = bitcast i8* %418 to { { i64, %Array* }*, i2, i64 }** + %420 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %419, align 8 + %421 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %420, i32 0, i32 0 + %422 = load { i64, %Array* }*, { i64, %Array* }** %421, align 8 + %423 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %422, i32 0, i32 1 + %424 = load %Array*, %Array** %423, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %424, i32 -1) + %425 = bitcast { i64, %Array* }* %422 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %425, i32 -1) + %426 = bitcast { { i64, %Array* }*, i2, i64 }* %420 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %426, i32 -1) + br label %exiting__26 + +exiting__26: ; preds = %body__26 + %427 = add i64 %416, 1 + br label %header__26 + +exit__26: ; preds = %header__26 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 -1) + %428 = sub i64 %193, 1 + br label %header__27 + +header__27: ; preds = %exiting__27, %exit__26 + %429 = phi i64 [ 0, %exit__26 ], [ %440, %exiting__27 ] + %430 = icmp sle i64 %429, %428 + br i1 %430, label %body__27, label %exit__27 + +body__27: ; preds = %header__27 + %431 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %190, i64 %429) + %432 = bitcast i8* %431 to { { i64, %Array* }*, i2, i64 }** + %433 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %432, align 8 + %434 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %433, i32 0, i32 0 + %435 = load { i64, %Array* }*, { i64, %Array* }** %434, align 8 + %436 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %435, i32 0, i32 1 + %437 = load %Array*, %Array** %436, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %437, i32 -1) + %438 = bitcast { i64, %Array* }* %435 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %438, i32 -1) + %439 = bitcast { { i64, %Array* }*, i2, i64 }* %433 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %439, i32 -1) + br label %exiting__27 + +exiting__27: ; preds = %body__27 + %440 = add i64 %429, 1 + br label %header__27 + +exit__27: ; preds = %header__27 + call void @__quantum__rt__array_update_alias_count(%Array* %190, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %192, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %384, i32 -1) + %441 = sub i64 %79, 1 + br label %header__28 + +header__28: ; preds = %exiting__28, %exit__27 + %442 = phi i64 [ 0, %exit__27 ], [ %450, %exiting__28 ] + %443 = icmp sle i64 %442, %441 + br i1 %443, label %body__28, label %exit__28 + +body__28: ; preds = %header__28 + %444 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %442) + %445 = bitcast i8* %444 to { %Array*, i64 }** + %446 = load { %Array*, i64 }*, { %Array*, i64 }** %445, align 8 + %447 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %446, i32 0, i32 0 + %448 = load %Array*, %Array** %447, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %448, i32 -1) + %449 = bitcast { %Array*, i64 }* %446 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %449, i32 -1) + br label %exiting__28 + +exiting__28: ; preds = %body__28 + %450 = add i64 %442, 1 + br label %header__28 + +exit__28: ; preds = %header__28 + call void @__quantum__rt__array_update_alias_count(%Array* %samples, i32 -1) + %451 = sub i64 %91, 1 + br label %header__29 + +header__29: ; preds = %exiting__29, %exit__28 + %452 = phi i64 [ 0, %exit__28 ], [ %460, %exiting__29 ] + %453 = icmp sle i64 %452, %451 + br i1 %453, label %body__29, label %exit__29 + +body__29: ; preds = %header__29 + %454 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %stateGenerators, i64 %452) + %455 = bitcast i8* %454 to { i64, %Callable* }** + %456 = load { i64, %Callable* }*, { i64, %Callable* }** %455, align 8 + %457 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %456, i32 0, i32 1 + %458 = load %Callable*, %Callable** %457, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %458, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %458, i32 -1) + %459 = bitcast { i64, %Callable* }* %456 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %459, i32 -1) + br label %exiting__29 + +exiting__29: ; preds = %body__29 + %460 = add i64 %452, 1 + br label %header__29 + +exit__29: ; preds = %header__29 + call void @__quantum__rt__array_update_alias_count(%Array* %stateGenerators, i32 -1) + %461 = sub i64 %103, 1 + br label %header__30 + +header__30: ; preds = %exiting__30, %exit__29 + %462 = phi i64 [ 0, %exit__29 ], [ %467, %exiting__30 ] + %463 = icmp sle i64 %462, %461 + br i1 %463, label %body__30, label %exit__30 + +body__30: ; preds = %header__30 + %464 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %462) + %465 = bitcast i8* %464 to %Array** + %466 = load %Array*, %Array** %465, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %466, i32 -1) + br label %exiting__30 + +exiting__30: ; preds = %body__30 + %467 = add i64 %462, 1 + br label %header__30 + +exit__30: ; preds = %header__30 + call void @__quantum__rt__array_update_alias_count(%Array* %features, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %inferredLabels, i32 -1) + %468 = sub i64 %147, 1 + br label %header__31 + +header__31: ; preds = %exiting__31, %exit__30 + %469 = phi i64 [ 0, %exit__30 ], [ %476, %exiting__31 ] + %470 = icmp sle i64 %469, %468 + br i1 %470, label %body__31, label %exit__31 + +body__31: ; preds = %header__31 + %471 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %minibatches, i64 %469) + %472 = bitcast i8* %471 to %Array** + %473 = load %Array*, %Array** %472, align 8 + %474 = call i64 @__quantum__rt__array_get_size_1d(%Array* %473) + %475 = sub i64 %474, 1 + br label %header__32 + +exiting__31: ; preds = %exit__32 + %476 = add i64 %469, 1 + br label %header__31 + +exit__31: ; preds = %header__31 + call void @__quantum__rt__array_update_alias_count(%Array* %minibatches, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %78, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %78, i32 -1) + %477 = sub i64 %79, 1 + br label %header__33 + +header__32: ; preds = %exiting__32, %body__31 + %478 = phi i64 [ 0, %body__31 ], [ %494, %exiting__32 ] + %479 = icmp sle i64 %478, %475 + br i1 %479, label %body__32, label %exit__32 + +body__32: ; preds = %header__32 + %480 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %473, i64 %478) + %481 = bitcast i8* %480 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %482 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %481, align 8 + %483 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %482, i32 0, i32 0 + %484 = load { %Array*, i64 }*, { %Array*, i64 }** %483, align 8 + %485 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %484, i32 0, i32 0 + %486 = load %Array*, %Array** %485, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %486, i32 -1) + %487 = bitcast { %Array*, i64 }* %484 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %487, i32 -1) + %488 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %482, i32 0, i32 1 + %489 = load { i64, %Callable* }*, { i64, %Callable* }** %488, align 8 + %490 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %489, i32 0, i32 1 + %491 = load %Callable*, %Callable** %490, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %491, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %491, i32 -1) + %492 = bitcast { i64, %Callable* }* %489 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %492, i32 -1) + %493 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %482 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %493, i32 -1) + br label %exiting__32 + +exiting__32: ; preds = %body__32 + %494 = add i64 %478, 1 + br label %header__32 + +exit__32: ; preds = %header__32 + call void @__quantum__rt__array_update_alias_count(%Array* %473, i32 -1) + br label %exiting__31 + +header__33: ; preds = %exiting__33, %exit__31 + %495 = phi i64 [ 0, %exit__31 ], [ %503, %exiting__33 ] + %496 = icmp sle i64 %495, %477 + br i1 %496, label %body__33, label %exit__33 + +body__33: ; preds = %header__33 + %497 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %samples, i64 %495) + %498 = bitcast i8* %497 to { %Array*, i64 }** + %499 = load { %Array*, i64 }*, { %Array*, i64 }** %498, align 8 + %500 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %499, i32 0, i32 0 + %501 = load %Array*, %Array** %500, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %501, i32 -1) + %502 = bitcast { %Array*, i64 }* %499 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %502, i32 -1) + br label %exiting__33 + +exiting__33: ; preds = %body__33 + %503 = add i64 %495, 1 + br label %header__33 + +exit__33: ; preds = %header__33 + call void @__quantum__rt__array_update_reference_count(%Array* %samples, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %90, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %90, i32 -1) + %504 = sub i64 %91, 1 + br label %header__34 + +header__34: ; preds = %exiting__34, %exit__33 + %505 = phi i64 [ 0, %exit__33 ], [ %513, %exiting__34 ] + %506 = icmp sle i64 %505, %504 + br i1 %506, label %body__34, label %exit__34 + +body__34: ; preds = %header__34 + %507 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %stateGenerators, i64 %505) + %508 = bitcast i8* %507 to { i64, %Callable* }** + %509 = load { i64, %Callable* }*, { i64, %Callable* }** %508, align 8 + %510 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %509, i32 0, i32 1 + %511 = load %Callable*, %Callable** %510, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %511, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %511, i32 -1) + %512 = bitcast { i64, %Callable* }* %509 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %512, i32 -1) + br label %exiting__34 + +exiting__34: ; preds = %body__34 + %513 = add i64 %505, 1 + br label %header__34 + +exit__34: ; preds = %header__34 + call void @__quantum__rt__array_update_reference_count(%Array* %stateGenerators, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %102, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %102, i32 -1) + %514 = sub i64 %103, 1 + br label %header__35 + +header__35: ; preds = %exiting__35, %exit__34 + %515 = phi i64 [ 0, %exit__34 ], [ %520, %exiting__35 ] + %516 = icmp sle i64 %515, %514 + br i1 %516, label %body__35, label %exit__35 + +body__35: ; preds = %header__35 + %517 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %features, i64 %515) + %518 = bitcast i8* %517 to %Array** + %519 = load %Array*, %Array** %518, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %519, i32 -1) + br label %exiting__35 + +exiting__35: ; preds = %body__35 + %520 = add i64 %515, 1 + br label %header__35 + +exit__35: ; preds = %header__35 + call void @__quantum__rt__array_update_reference_count(%Array* %features, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %111, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %actualLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %118, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %inferredLabels, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %142, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %142, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %145, i32 -1) + %521 = call i64 @__quantum__rt__array_get_size_1d(%Array* %146) + %522 = sub i64 %521, 1 + br label %header__36 + +header__36: ; preds = %exiting__36, %exit__35 + %523 = phi i64 [ 0, %exit__35 ], [ %528, %exiting__36 ] + %524 = icmp sle i64 %523, %522 + br i1 %524, label %body__36, label %exit__36 + +body__36: ; preds = %header__36 + %525 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %146, i64 %523) + %526 = bitcast i8* %525 to %Array** + %527 = load %Array*, %Array** %526, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %527, i32 -1) + br label %exiting__36 + +exiting__36: ; preds = %body__36 + %528 = add i64 %523, 1 + br label %header__36 + +exit__36: ; preds = %header__36 + call void @__quantum__rt__array_update_reference_count(%Array* %146, i32 -1) + %529 = sub i64 %147, 1 + br label %header__37 + +header__37: ; preds = %exiting__37, %exit__36 + %530 = phi i64 [ 0, %exit__36 ], [ %537, %exiting__37 ] + %531 = icmp sle i64 %530, %529 + br i1 %531, label %body__37, label %exit__37 + +body__37: ; preds = %header__37 + %532 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %minibatches, i64 %530) + %533 = bitcast i8* %532 to %Array** + %534 = load %Array*, %Array** %533, align 8 + %535 = call i64 @__quantum__rt__array_get_size_1d(%Array* %534) + %536 = sub i64 %535, 1 + br label %header__38 + +exiting__37: ; preds = %exit__38 + %537 = add i64 %530, 1 + br label %header__37 + +exit__37: ; preds = %header__37 + call void @__quantum__rt__array_update_reference_count(%Array* %minibatches, i32 -1) + %538 = sub i64 %158, 1 + br label %header__39 + +header__38: ; preds = %exiting__38, %body__37 + %539 = phi i64 [ 0, %body__37 ], [ %555, %exiting__38 ] + %540 = icmp sle i64 %539, %536 + br i1 %540, label %body__38, label %exit__38 + +body__38: ; preds = %header__38 + %541 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %534, i64 %539) + %542 = bitcast i8* %541 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %543 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %542, align 8 + %544 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %543, i32 0, i32 0 + %545 = load { %Array*, i64 }*, { %Array*, i64 }** %544, align 8 + %546 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %545, i32 0, i32 0 + %547 = load %Array*, %Array** %546, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %547, i32 -1) + %548 = bitcast { %Array*, i64 }* %545 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %548, i32 -1) + %549 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %543, i32 0, i32 1 + %550 = load { i64, %Callable* }*, { i64, %Callable* }** %549, align 8 + %551 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %550, i32 0, i32 1 + %552 = load %Callable*, %Callable** %551, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %552, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %552, i32 -1) + %553 = bitcast { i64, %Callable* }* %550 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %553, i32 -1) + %554 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %543 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %554, i32 -1) + br label %exiting__38 + +exiting__38: ; preds = %body__38 + %555 = add i64 %539, 1 + br label %header__38 + +exit__38: ; preds = %header__38 + call void @__quantum__rt__array_update_reference_count(%Array* %534, i32 -1) + br label %exiting__37 + +header__39: ; preds = %exiting__39, %exit__37 + %556 = phi i64 [ 0, %exit__37 ], [ %565, %exiting__39 ] + %557 = icmp sle i64 %556, %538 + br i1 %557, label %body__39, label %exit__39 + +body__39: ; preds = %header__39 + %558 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %157, i64 %556) + %559 = bitcast i8* %558 to { i64, %Array* }** + %560 = load { i64, %Array* }*, { i64, %Array* }** %559, align 8 + %561 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %560, i32 0, i32 1 + %562 = load %Array*, %Array** %561, align 8 + %563 = call i64 @__quantum__rt__array_get_size_1d(%Array* %562) + %564 = sub i64 %563, 1 + br label %header__40 + +exiting__39: ; preds = %exit__40 + %565 = add i64 %556, 1 + br label %header__39 + +exit__39: ; preds = %header__39 + call void @__quantum__rt__array_update_reference_count(%Array* %157, i32 -1) + %566 = sub i64 %193, 1 + br label %header__41 + +header__40: ; preds = %exiting__40, %body__39 + %567 = phi i64 [ 0, %body__39 ], [ %583, %exiting__40 ] + %568 = icmp sle i64 %567, %564 + br i1 %568, label %body__40, label %exit__40 + +body__40: ; preds = %header__40 + %569 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %562, i64 %567) + %570 = bitcast i8* %569 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %571 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %570, align 8 + %572 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %571, i32 0, i32 0 + %573 = load { %Array*, i64 }*, { %Array*, i64 }** %572, align 8 + %574 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %573, i32 0, i32 0 + %575 = load %Array*, %Array** %574, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %575, i32 -1) + %576 = bitcast { %Array*, i64 }* %573 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %576, i32 -1) + %577 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %571, i32 0, i32 1 + %578 = load { i64, %Callable* }*, { i64, %Callable* }** %577, align 8 + %579 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %578, i32 0, i32 1 + %580 = load %Callable*, %Callable** %579, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %580, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %580, i32 -1) + %581 = bitcast { i64, %Callable* }* %578 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %581, i32 -1) + %582 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %571 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %582, i32 -1) + br label %exiting__40 + +exiting__40: ; preds = %body__40 + %583 = add i64 %567, 1 + br label %header__40 + +exit__40: ; preds = %header__40 + call void @__quantum__rt__array_update_reference_count(%Array* %562, i32 -1) + %584 = bitcast { i64, %Array* }* %560 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %584, i32 -1) + br label %exiting__39 + +header__41: ; preds = %exiting__41, %exit__39 + %585 = phi i64 [ 0, %exit__39 ], [ %596, %exiting__41 ] + %586 = icmp sle i64 %585, %566 + br i1 %586, label %body__41, label %exit__41 + +body__41: ; preds = %header__41 + %587 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %190, i64 %585) + %588 = bitcast i8* %587 to { { i64, %Array* }*, i2, i64 }** + %589 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %588, align 8 + %590 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %589, i32 0, i32 0 + %591 = load { i64, %Array* }*, { i64, %Array* }** %590, align 8 + %592 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %591, i32 0, i32 1 + %593 = load %Array*, %Array** %592, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %593, i32 -1) + %594 = bitcast { i64, %Array* }* %591 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %594, i32 -1) + %595 = bitcast { { i64, %Array* }*, i2, i64 }* %589 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %595, i32 -1) + br label %exiting__41 + +exiting__41: ; preds = %body__41 + %596 = add i64 %585, 1 + br label %header__41 + +exit__41: ; preds = %header__41 + call void @__quantum__rt__array_update_reference_count(%Array* %190, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %192, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %384, i32 -1) + ret { i64, { %Array*, %Array*, double }* }* %386 +} + +define internal i1 @Microsoft__Quantum__Logical__NearlyEqualD__body(double %a, double %b) { +entry: + %0 = fsub double %a, %b + %1 = call double @Microsoft__Quantum__Math__AbsD__body(double %0) + %2 = fcmp olt double %1, 0x3D719799812DEA11 + ret i1 %2 +} + +define internal i1 @Microsoft__Quantum__MachineLearning____QsRef0__AllNearlyEqualD____body(%Array* %v1, %Array* %v2) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %v1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %v2, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %v1) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %v2) + %2 = icmp eq i64 %0, %1 + br i1 %2, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__NearlyEqualD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___1ff548eeaa7940ff923499697de8c6a5_Zipped__body(%Array* %v1, %Array* %v2) + %5 = call i1 @Microsoft__Quantum__Arrays___fb527ee42fef4f1c9ddef4d984062404_All__body(%Callable* %3, %Array* %4) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %7 = sub i64 %6, 1 + br label %header__1 + +condContinue__1: ; preds = %exit__1, %entry + %8 = phi i1 [ %5, %exit__1 ], [ %2, %entry ] + call void @__quantum__rt__array_update_alias_count(%Array* %v1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %v2, i32 -1) + ret i1 %8 + +header__1: ; preds = %exiting__1, %condTrue__1 + %9 = phi i64 [ 0, %condTrue__1 ], [ %15, %exiting__1 ] + %10 = icmp sle i64 %9, %7 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %9) + %12 = bitcast i8* %11 to { double, double }** + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + br label %condContinue__1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___6bc4a411bac74d8081320ac7e36319e3_ForEach__body(%Callable* %action, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to double* + %4 = load double, double* %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %4, double* %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { double }* + %10 = getelementptr inbounds { double }, { double }* %9, i32 0, i32 0 + %first = load double, double* %10, align 8 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %12 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %13 = phi i64 [ 0, %continue__1 ], [ %17, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %13) + %16 = bitcast i8* %15 to double* + store double %first, double* %16, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %11, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %18 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %34, %exiting__2 ] + %19 = icmp sle i64 %idx, %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + %21 = call %Array* @__quantum__rt__array_copy(%Array* %20, i1 false) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %23 = bitcast i8* %22 to double* + %24 = load double, double* %23, align 8 + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { double }* + %27 = getelementptr inbounds { double }, { double }* %26, i32 0, i32 0 + store double %24, double* %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %25, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { double }* + %30 = getelementptr inbounds { double }, { double }* %29, i32 0, i32 0 + %31 = load double, double* %30, align 8 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %idx) + %33 = bitcast i8* %32 to double* + store double %31, double* %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + store %Array* %21, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %34 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %35 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %35 +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double }* + %4 = getelementptr inbounds { double }, { double }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__RandomlyRescale____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call double @Microsoft__Quantum__MachineLearning____QsRef0__RandomlyRescale____body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %5, double* %7, align 8 + ret void +} + +define internal void @MemoryManagement__3__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__3__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__MachineLearning____QsRef0__RandomlyRescale____body(double %scale, double %value) { +entry: + %0 = call i1 @Microsoft__Quantum__Random__DrawRandomBool__body(double 5.000000e-01) + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %1 = phi double [ 1.000000e+00, %condTrue__1 ], [ -1.000000e+00, %condFalse__1 ] + %2 = fmul double %scale, %1 + %3 = fadd double 1.000000e+00, %2 + %4 = fmul double %value, %3 + ret double %4 +} + +define internal %Array* @Microsoft__Quantum__MachineLearning___Features__body({ %Array*, i64 }* %sample) { +entry: + %0 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %sample, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array*, i64 }* %sample to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret %Array* %1 +} + +define internal i64 @Microsoft__Quantum__MachineLearning___Label__body({ %Array*, i64 }* %sample) { +entry: + %0 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %sample, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array*, i64 }* %sample to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %sample, i32 0, i32 1 + %4 = load i64, i64* %3, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret i64 %4 +} + +define internal { { %Array*, i64 }*, { i64, %Callable* }* }* @Microsoft__Quantum__MachineLearning____QsRef0__EncodeSample____body(double %effectiveTolerance, i64 %nQubits, { %Array*, i64 }* %sample) { +entry: + %0 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %sample, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array*, i64 }* %sample to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %3 = call { i64, %Callable* }* @Microsoft__Quantum__MachineLearning__ApproximateInputEncoder__body(double %effectiveTolerance, %Array* %1) + %4 = bitcast { i64, %Callable* }* %3 to %Tuple* + %5 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %4, i1 false) + %6 = bitcast %Tuple* %5 to { i64, %Callable* }* + %7 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %6, i32 0, i32 0 + store i64 %nQubits, i64* %7, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }*, { i64, %Callable* }* }* getelementptr ({ { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array*, i64 }*, { i64, %Callable* }* }* + %10 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %9, i32 0, i32 1 + store { %Array*, i64 }* %sample, { %Array*, i64 }** %10, align 8 + store { i64, %Callable* }* %6, { i64, %Callable* }** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret { { %Array*, i64 }*, { i64, %Callable* }* }* %9 +} + +define internal i1 @Microsoft__Quantum__Arrays___fb527ee42fef4f1c9ddef4d984062404_All__body(%Callable* %predicate, %Array* %array) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %predicate, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %predicate, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__And__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Array* @Microsoft__Quantum__Arrays___83850f08600e4e54b1fe3e670f742428_Mapped__body(%Callable* %predicate, %Array* %array) + %11 = call i1 @Microsoft__Quantum__Arrays___e14e05cbd7674cf99f7174e4f55f22e1_Fold__body(%Callable* %9, i1 true, %Array* %10) + call void @__quantum__rt__capture_update_alias_count(%Callable* %predicate, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %predicate, i32 -1) + %12 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + ret i1 %11 +} + +define internal void @Microsoft__Quantum__Logical__NearlyEqualD__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call i1 @Microsoft__Quantum__Logical__NearlyEqualD__body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { i1 }* + %7 = getelementptr inbounds { i1 }, { i1 }* %6, i32 0, i32 0 + store i1 %5, i1* %7, align 1 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___1ff548eeaa7940ff923499697de8c6a5_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to double* + %7 = load double, double* %6, align 8 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to double* + %10 = load double, double* %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { double, double }* + %13 = getelementptr inbounds { double, double }, { double, double }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { double, double }, { double, double }* %12, i32 0, i32 1 + store double %7, double* %13, align 8 + store double %10, double* %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + store { double, double }* %12, { double, double }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { double, double }** + %27 = load { double, double }*, { double, double }** %26, align 8 + %28 = bitcast { double, double }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to double* + %36 = load double, double* %35, align 8 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to double* + %39 = load double, double* %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { double, double }* + %42 = getelementptr inbounds { double, double }, { double, double }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { double, double }, { double, double }* %41, i32 0, i32 1 + store double %36, double* %42, align 8 + store double %39, double* %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { double, double }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { double, double }*, { double, double }** %45, align 8 + %47 = bitcast { double, double }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { double, double }* %41, { double, double }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { double, double }** + %56 = load { double, double }*, { double, double }** %55, align 8 + %57 = bitcast { double, double }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____body(%Array* %datum, { %Array* }* %reg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 1) + %4 = bitcast i8* %3 to double* + %5 = load double, double* %4, align 8 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %7 = bitcast i8* %6 to double* + %8 = load double, double* %7, align 8 + %x = fdiv double %5, %8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 2) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %13 = bitcast i8* %12 to double* + %14 = load double, double* %13, align 8 + %y = fdiv double %11, %14 + %15 = call double @__quantum__qis__arctan__body(double %x) + %ax = fmul double 2.000000e+00, %15 + %16 = call double @__quantum__qis__arctan__body(double %y) + %ay = fmul double 2.000000e+00, %16 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 1) + %18 = bitcast i8* %17 to %Qubit** + %qubit = load %Qubit*, %Qubit** %18, align 8 + call void @__quantum__qis__r__body(i2 -1, double %ay, %Qubit* %qubit) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 0) + %20 = bitcast i8* %19 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %20, align 8 + call void @__quantum__qis__r__body(i2 -1, double %ax, %Qubit* %qubit__1) + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +declare double @__quantum__qis__arctan__body(double) + +declare void @__quantum__qis__r__body(i2, double, %Qubit*) + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____adj(%Array* %datum, { %Array* }* %reg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 1) + %4 = bitcast i8* %3 to double* + %5 = load double, double* %4, align 8 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %7 = bitcast i8* %6 to double* + %8 = load double, double* %7, align 8 + %__qsVar0__x__ = fdiv double %5, %8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 2) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %13 = bitcast i8* %12 to double* + %14 = load double, double* %13, align 8 + %__qsVar1__y__ = fdiv double %11, %14 + %15 = call double @__quantum__qis__arctan__body(double %__qsVar0__x__) + %__qsVar2__ax__ = fmul double 2.000000e+00, %15 + %16 = call double @__quantum__qis__arctan__body(double %__qsVar1__y__) + %__qsVar3__ay__ = fmul double 2.000000e+00, %16 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 0) + %18 = bitcast i8* %17 to %Qubit** + %qubit = load %Qubit*, %Qubit** %18, align 8 + call void @__quantum__qis__r__adj(i2 -1, double %__qsVar2__ax__, %Qubit* %qubit) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 1) + %20 = bitcast i8* %19 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %20, align 8 + call void @__quantum__qis__r__adj(i2 -1, double %__qsVar3__ay__, %Qubit* %qubit__1) + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +declare void @__quantum__qis__r__adj(i2, double, %Qubit*) + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %datum = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %reg = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 1) + %7 = bitcast i8* %6 to double* + %8 = load double, double* %7, align 8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %x = fdiv double %8, %11 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 2) + %13 = bitcast i8* %12 to double* + %14 = load double, double* %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %y = fdiv double %14, %17 + %18 = call double @__quantum__qis__arctan__body(double %x) + %ax = fmul double 2.000000e+00, %18 + %19 = call double @__quantum__qis__arctan__body(double %y) + %ay = fmul double 2.000000e+00, %19 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 1) + %21 = bitcast i8* %20 to %Qubit** + %qubit = load %Qubit*, %Qubit** %21, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { i2, double, %Qubit* }* + %24 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %23, i32 0, i32 2 + store i2 -1, i2* %24, align 1 + store double %ay, double* %25, align 8 + store %Qubit* %qubit, %Qubit** %26, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %28 = bitcast i8* %27 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { i2, double, %Qubit* }* + %31 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %30, i32 0, i32 1 + %33 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %30, i32 0, i32 2 + store i2 -1, i2* %31, align 1 + store double %ax, double* %32, align 8 + store %Qubit* %qubit__1, %Qubit** %33, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %30) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + ret void +} + +declare void @__quantum__qis__r__ctl(%Array*, { i2, double, %Qubit* }*) + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %datum = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 1) + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %reg = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 1) + %7 = bitcast i8* %6 to double* + %8 = load double, double* %7, align 8 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %__qsVar0__x__ = fdiv double %8, %11 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 2) + %13 = bitcast i8* %12 to double* + %14 = load double, double* %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %16 = bitcast i8* %15 to double* + %17 = load double, double* %16, align 8 + %__qsVar1__y__ = fdiv double %14, %17 + %18 = call double @__quantum__qis__arctan__body(double %__qsVar0__x__) + %__qsVar2__ax__ = fmul double 2.000000e+00, %18 + %19 = call double @__quantum__qis__arctan__body(double %__qsVar1__y__) + %__qsVar3__ay__ = fmul double 2.000000e+00, %19 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %21 = bitcast i8* %20 to %Qubit** + %qubit = load %Qubit*, %Qubit** %21, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { i2, double, %Qubit* }* + %24 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %23, i32 0, i32 2 + store i2 -1, i2* %24, align 1 + store double %__qsVar2__ax__, double* %25, align 8 + store %Qubit* %qubit, %Qubit** %26, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 1) + %28 = bitcast i8* %27 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %28, align 8 + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { i2, double, %Qubit* }* + %31 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %30, i32 0, i32 1 + %33 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %30, i32 0, i32 2 + store i2 -1, i2* %31, align 1 + store double %__qsVar3__ay__, double* %32, align 8 + store %Qubit* %qubit__1, %Qubit** %33, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %30) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + ret void +} + +declare void @__quantum__qis__r__ctladj(%Array*, { i2, double, %Qubit* }*) + +define internal i1 @Microsoft__Quantum__MachineLearning____QsRef0__CanApplyTwoQubitCase____body(%Array* %datum) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %datum) + %1 = icmp eq i64 %0, 4 + br i1 %1, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %3 = bitcast i8* %2 to double* + %4 = load double, double* %3, align 8 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 3) + %6 = bitcast i8* %5 to double* + %7 = load double, double* %6, align 8 + %8 = fmul double %4, %7 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 1) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 2) + %13 = bitcast i8* %12 to double* + %14 = load double, double* %13, align 8 + %15 = fmul double %11, %14 + %16 = fsub double %8, %15 + %17 = call double @Microsoft__Quantum__Math__AbsD__body(double %16) + %18 = fcmp olt double %17, 0x3D719799812DEA11 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %19 = phi i1 [ %18, %condTrue__1 ], [ %1, %entry ] + br i1 %19, label %condTrue__2, label %condContinue__2 + +condTrue__2: ; preds = %condContinue__1 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %datum, i64 0) + %21 = bitcast i8* %20 to double* + %22 = load double, double* %21, align 8 + %23 = call double @Microsoft__Quantum__Math__AbsD__body(double %22) + %24 = fcmp ogt double %23, 1.000000e-04 + br label %condContinue__2 + +condContinue__2: ; preds = %condTrue__2, %condContinue__1 + %25 = phi i1 [ %24, %condTrue__2 ], [ %19, %condContinue__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %datum, i32 -1) + ret i1 %25 +} + +define internal double @Microsoft__Quantum__Math__AbsD__body(double %a) { +entry: + %0 = fcmp olt double %a, 0.000000e+00 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = fneg double %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi double [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret double %2 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__MachineLearning__ApproximateInputEncoder__body(double %tolerance, %Array* %coefficients) { +entry: + %ang = alloca double, align 8 + %magnitude = alloca double, align 8 + %cNegative = alloca i64, align 8 + %complexCoefficients = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %nCoefficients = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %0 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nCoefficients) + %2 = sub i64 %nCoefficients, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + store { double, double }* %0, { double, double }** %6, align 8 + %7 = bitcast { double, double }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %1, %Array** %complexCoefficients, align 8 + %9 = sub i64 %nCoefficients, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %10) + %13 = bitcast i8* %12 to { double, double }** + %14 = load { double, double }*, { double, double }** %13, align 8 + %15 = bitcast { double, double }* %14 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + store i64 0, i64* %cNegative, align 4 + %17 = call %Array* @Microsoft__Quantum__Arrays___0e6bc4a124064ccaaf317888c577a89b_Enumerated__body(%Array* %coefficients) + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %20 = phi i64 [ 0, %exit__2 ], [ %51, %exiting__3 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to { i64, double }** + %24 = load { i64, double }*, { i64, double }** %23, align 8 + %25 = getelementptr inbounds { i64, double }, { i64, double }* %24, i32 0, i32 0 + %idx = load i64, i64* %25, align 4 + %26 = getelementptr inbounds { i64, double }, { i64, double }* %24, i32 0, i32 1 + %coef = load double, double* %26, align 8 + store double %coef, double* %magnitude, align 8 + %27 = fcmp ogt double %tolerance, 1.000000e-09 + br i1 %27, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__3 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idx) + %29 = bitcast i8* %28 to double* + %30 = load double, double* %29, align 8 + %31 = fdiv double %30, %tolerance + %32 = call i64 @Microsoft__Quantum__Math__Round__body(double %31) + %33 = sitofp i64 %32 to double + %34 = fmul double %tolerance, %33 + store double %34, double* %magnitude, align 8 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__3 + store double 0.000000e+00, double* %ang, align 8 + %35 = load double, double* %magnitude, align 8 + %36 = fcmp olt double %35, 0.000000e+00 + br i1 %36, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %37 = load i64, i64* %cNegative, align 4 + %38 = add i64 %37, 1 + store i64 %38, i64* %cNegative, align 4 + %39 = fneg double %35 + store double %39, double* %magnitude, align 8 + %40 = call double @Microsoft__Quantum__Math__PI__body() + store double %40, double* %ang, align 8 + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + %41 = load %Array*, %Array** %complexCoefficients, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %42 = call %Array* @__quantum__rt__array_copy(%Array* %41, i1 false) + %43 = load double, double* %magnitude, align 8 + %44 = load double, double* %ang, align 8 + %45 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %43, double %44) + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %idx) + %47 = bitcast i8* %46 to { double, double }** + %48 = bitcast { double, double }* %45 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 1) + %49 = load { double, double }*, { double, double }** %47, align 8 + %50 = bitcast { double, double }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 -1) + store { double, double }* %45, { double, double }** %47, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + store %Array* %42, %Array** %complexCoefficients, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %continue__2 + %51 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %52 = call i1 @Microsoft__Quantum__MachineLearning____QsRef0__CanApplyTwoQubitCase____body(%Array* %coefficients) + br i1 %52, label %then0__3, label %continue__3 + +then0__3: ; preds = %exit__3 + %53 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + %54 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %55 = bitcast %Tuple* %54 to { %Callable*, %Array* }* + %56 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %55, i32 0, i32 0 + %57 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %55, i32 0, i32 1 + store %Callable* %53, %Callable** %56, align 8 + store %Array* %coefficients, %Array** %57, align 8 + %58 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__10__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__9__FunctionTable, %Tuple* %54) + %59 = call { i64, %Callable* }* @Microsoft__Quantum__MachineLearning__StateGenerator__body(i64 2, %Callable* %58) + %60 = load %Array*, %Array** %complexCoefficients, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %61 = call i64 @__quantum__rt__array_get_size_1d(%Array* %60) + %62 = sub i64 %61, 1 + br label %header__4 + +continue__3: ; preds = %exit__3 + %nQubits = call i64 @Microsoft__Quantum__MachineLearning__FeatureRegisterSize__body(%Array* %coefficients) + %63 = load i64, i64* %cNegative, align 4 + %64 = icmp sgt i64 %63, 0 + br i1 %64, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %continue__3 + %65 = sitofp i64 %63 to double + %66 = sitofp i64 %nCoefficients to double + %67 = call double @Microsoft__Quantum__Math__Lg__body(double %66) + %68 = fadd double %67, 1.000000e+00 + %69 = fcmp olt double %65, %68 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %continue__3 + %70 = phi i1 [ %69, %condTrue__1 ], [ %64, %continue__3 ] + br i1 %70, label %then0__4, label %continue__4 + +then0__4: ; preds = %condContinue__1 + %71 = load %Array*, %Array** %complexCoefficients, align 8 + %negLocs = call %Array* @Microsoft__Quantum__MachineLearning____QsRef0__NegativeLocations____body(i64 %63, %Array* %71) + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 1) + %72 = call %Array* @Microsoft__Quantum__MachineLearning____QsRef0__Unnegate____body(%Array* %negLocs, %Array* %71) + %73 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %72, i64 %nQubits) + %74 = call i64 @__quantum__rt__array_get_size_1d(%Array* %72) + %75 = sub i64 %74, 1 + br label %header__7 + +continue__4: ; preds = %condContinue__1 + %76 = load %Array*, %Array** %complexCoefficients, align 8 + %77 = call %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %76, i64 %nQubits) + %78 = call { i64, %Callable* }* @Microsoft__Quantum__MachineLearning__StateGenerator__body(i64 %nQubits, %Callable* %77) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %79 = call i64 @__quantum__rt__array_get_size_1d(%Array* %76) + %80 = sub i64 %79, 1 + br label %header__13 + +header__4: ; preds = %exiting__4, %then0__3 + %81 = phi i64 [ 0, %then0__3 ], [ %87, %exiting__4 ] + %82 = icmp sle i64 %81, %62 + br i1 %82, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %83 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %81) + %84 = bitcast i8* %83 to { double, double }** + %85 = load { double, double }*, { double, double }** %84, align 8 + %86 = bitcast { double, double }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %87 = add i64 %81, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 -1) + %88 = bitcast { double, double }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %88, i32 -1) + %89 = sub i64 %18, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %90 = phi i64 [ 0, %exit__4 ], [ %96, %exiting__5 ] + %91 = icmp sle i64 %90, %89 + br i1 %91, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %90) + %93 = bitcast i8* %92 to { i64, double }** + %94 = load { i64, double }*, { i64, double }** %93, align 8 + %95 = bitcast { i64, double }* %94 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %95, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %96 = add i64 %90, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %58, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %58, i32 -1) + %97 = sub i64 %61, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %98 = phi i64 [ 0, %exit__5 ], [ %104, %exiting__6 ] + %99 = icmp sle i64 %98, %97 + br i1 %99, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %100 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %98) + %101 = bitcast i8* %100 to { double, double }** + %102 = load { double, double }*, { double, double }** %101, align 8 + %103 = bitcast { double, double }* %102 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %103, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %104 = add i64 %98, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %60, i32 -1) + ret { i64, %Callable* }* %59 + +header__7: ; preds = %exiting__7, %then0__4 + %105 = phi i64 [ 0, %then0__4 ], [ %111, %exiting__7 ] + %106 = icmp sle i64 %105, %75 + br i1 %106, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %107 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %72, i64 %105) + %108 = bitcast i8* %107 to { double, double }** + %109 = load { double, double }*, { double, double }** %108, align 8 + %110 = bitcast { double, double }* %109 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %110, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %111 = add i64 %105, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + %112 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %negLocs, i32 1) + %113 = call i64 @__quantum__rt__array_get_size_1d(%Array* %71) + %114 = sub i64 %113, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %115 = phi i64 [ 0, %exit__7 ], [ %121, %exiting__8 ] + %116 = icmp sle i64 %115, %114 + br i1 %116, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %117 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 %115) + %118 = bitcast i8* %117 to { double, double }** + %119 = load { double, double }*, { double, double }** %118, align 8 + %120 = bitcast { double, double }* %119 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %120, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %121 = add i64 %115, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 1) + %122 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array*, %Array* }* getelementptr ({ %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* null, i32 1) to i64)) + %123 = bitcast %Tuple* %122 to { %Callable*, %Array*, %Array* }* + %124 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %123, i32 0, i32 0 + %125 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %123, i32 0, i32 1 + %126 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %123, i32 0, i32 2 + store %Callable* %112, %Callable** %124, align 8 + store %Array* %negLocs, %Array** %125, align 8 + store %Array* %71, %Array** %126, align 8 + %127 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__11__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__10__FunctionTable, %Tuple* %122) + %128 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 2) + %129 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 0) + %130 = bitcast i8* %129 to %Callable** + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 1) + %132 = bitcast i8* %131 to %Callable** + store %Callable* %73, %Callable** %130, align 8 + store %Callable* %127, %Callable** %132, align 8 + %133 = call %Callable* @Microsoft__Quantum__Canon___1809700b885a46aeb0473713f7c55f2f_BoundCA__body(%Array* %128) + %134 = call { i64, %Callable* }* @Microsoft__Quantum__MachineLearning__StateGenerator__body(i64 %nQubits, %Callable* %133) + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %135 = sub i64 %113, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %136 = phi i64 [ 0, %exit__8 ], [ %142, %exiting__9 ] + %137 = icmp sle i64 %136, %135 + br i1 %137, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %138 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 %136) + %139 = bitcast i8* %138 to { double, double }** + %140 = load { double, double }*, { double, double }** %139, align 8 + %141 = bitcast { double, double }* %140 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %141, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %142 = add i64 %136, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %71, i32 -1) + %143 = bitcast { double, double }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %143, i32 -1) + %144 = sub i64 %18, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %145 = phi i64 [ 0, %exit__9 ], [ %151, %exiting__10 ] + %146 = icmp sle i64 %145, %144 + br i1 %146, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %147 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %145) + %148 = bitcast i8* %147 to { i64, double }** + %149 = load { i64, double }*, { i64, double }** %148, align 8 + %150 = bitcast { i64, double }* %149 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %150, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %151 = add i64 %145, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %negLocs, i32 -1) + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %152 = phi i64 [ 0, %exit__10 ], [ %157, %exiting__11 ] + %153 = icmp sle i64 %152, 1 + br i1 %153, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %154 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %128, i64 %152) + %155 = bitcast i8* %154 to %Callable** + %156 = load %Callable*, %Callable** %155, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %156, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %156, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %157 = add i64 %152, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_reference_count(%Array* %128, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %133, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %133, i32 -1) + %158 = sub i64 %113, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %159 = phi i64 [ 0, %exit__11 ], [ %165, %exiting__12 ] + %160 = icmp sle i64 %159, %158 + br i1 %160, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %161 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %71, i64 %159) + %162 = bitcast i8* %161 to { double, double }** + %163 = load { double, double }*, { double, double }** %162, align 8 + %164 = bitcast { double, double }* %163 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %164, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %165 = add i64 %159, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %71, i32 -1) + ret { i64, %Callable* }* %134 + +header__13: ; preds = %exiting__13, %continue__4 + %166 = phi i64 [ 0, %continue__4 ], [ %172, %exiting__13 ] + %167 = icmp sle i64 %166, %80 + br i1 %167, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %168 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %76, i64 %166) + %169 = bitcast i8* %168 to { double, double }** + %170 = load { double, double }*, { double, double }** %169, align 8 + %171 = bitcast { double, double }* %170 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %171, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %172 = add i64 %166, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %76, i32 -1) + %173 = bitcast { double, double }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %173, i32 -1) + %174 = sub i64 %18, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %175 = phi i64 [ 0, %exit__13 ], [ %181, %exiting__14 ] + %176 = icmp sle i64 %175, %174 + br i1 %176, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %177 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %175) + %178 = bitcast i8* %177 to { i64, double }** + %179 = load { i64, double }*, { i64, double }** %178, align 8 + %180 = bitcast { i64, double }* %179 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %180, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %181 = add i64 %175, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %77, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %77, i32 -1) + %182 = sub i64 %79, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %183 = phi i64 [ 0, %exit__14 ], [ %189, %exiting__15 ] + %184 = icmp sle i64 %183, %182 + br i1 %184, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %185 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %76, i64 %183) + %186 = bitcast i8* %185 to { double, double }** + %187 = load { double, double }*, { double, double }** %186, align 8 + %188 = bitcast { double, double }* %187 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %189 = add i64 %183, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 -1) + ret { i64, %Callable* }* %78 +} + +define internal double @Microsoft__Quantum__MachineLearning____QsRef0__EstimateDerivativeWithParameterShift____body({ i64, %Callable* }* %inputEncoder, { %Array*, %Array*, double }* %model, { %Array*, %Array* }* %parameters, i64 %nQubits, i64 %nMeasurements) { +entry: + %0 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %inputEncoder, i32 0, i32 1 + %1 = load %Callable*, %Callable** %0, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 1) + %2 = bitcast { i64, %Callable* }* %inputEncoder to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { i64, %Array* }*, i2, i64 }** + %11 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %10, align 8 + %12 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %11, i32 0, i32 0 + %13 = load { i64, %Array* }*, { i64, %Array* }** %12, align 8 + %14 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %13, i32 0, i32 1 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %16 = bitcast { i64, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = bitcast { { i64, %Array* }*, i2, i64 }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %19 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %20 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 1) + %21 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %parameters, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %24 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %parameters, i32 0, i32 1 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, %Array* }* %parameters to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { i64, %Callable* }* }* getelementptr ({ %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, { i64, %Callable* }* }* + %30 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %29, i32 0, i32 1 + store %Callable* %27, %Callable** %30, align 8 + store { i64, %Callable* }* %inputEncoder, { i64, %Callable* }** %31, align 8 + %32 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__4__FunctionTable, %Tuple* %28) + %33 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %34 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %21, i1 false) + %35 = bitcast %Tuple* %34 to { %Array*, %Array*, double }* + %36 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %35, i32 0, i32 1 + %37 = call %Array* @Microsoft__Quantum__Canon___db88ab328d524c76b8a01044e6a98e3d_Fst__body(%Array* %23, %Array* %25) + store %Array* %37, %Array** %36, align 8 + %38 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %35, i32 0, i32 0 + %39 = load %Array*, %Array** %38, align 8 + %40 = call i64 @__quantum__rt__array_get_size_1d(%Array* %39) + %41 = sub i64 %40, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %42 = phi i64 [ 0, %exit__1 ], [ %53, %exiting__2 ] + %43 = icmp sle i64 %42, %41 + br i1 %43, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %39, i64 %42) + %45 = bitcast i8* %44 to { { i64, %Array* }*, i2, i64 }** + %46 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %45, align 8 + %47 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %46, i32 0, i32 0 + %48 = load { i64, %Array* }*, { i64, %Array* }** %47, align 8 + %49 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %48, i32 0, i32 1 + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 1) + %51 = bitcast { i64, %Array* }* %48 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 1) + %52 = bitcast { { i64, %Array* }*, i2, i64 }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %52, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %53 = add i64 %42, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %39, i32 1) + %54 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array*, %Array*, double }* }* getelementptr ({ %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* null, i32 1) to i64)) + %55 = bitcast %Tuple* %54 to { %Callable*, { %Array*, %Array*, double }* }* + %56 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %55, i32 0, i32 0 + %57 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %55, i32 0, i32 1 + store %Callable* %33, %Callable** %56, align 8 + store { %Array*, %Array*, double }* %35, { %Array*, %Array*, double }** %57, align 8 + %58 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__5__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__5__FunctionTable, %Tuple* %54) + %59 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %60 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %21, i1 false) + %61 = bitcast %Tuple* %60 to { %Array*, %Array*, double }* + %62 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %61, i32 0, i32 1 + %63 = call %Array* @Microsoft__Quantum__Canon___002990bbfeef4011a0d3a74007551555_Snd__body(%Array* %23, %Array* %25) + store %Array* %63, %Array** %62, align 8 + %64 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %61, i32 0, i32 0 + %65 = load %Array*, %Array** %64, align 8 + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %65) + %67 = sub i64 %66, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %68 = phi i64 [ 0, %exit__2 ], [ %79, %exiting__3 ] + %69 = icmp sle i64 %68, %67 + br i1 %69, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %65, i64 %68) + %71 = bitcast i8* %70 to { { i64, %Array* }*, i2, i64 }** + %72 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %71, align 8 + %73 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %72, i32 0, i32 0 + %74 = load { i64, %Array* }*, { i64, %Array* }** %73, align 8 + %75 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %74, i32 0, i32 1 + %76 = load %Array*, %Array** %75, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %76, i32 1) + %77 = bitcast { i64, %Array* }* %74 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 1) + %78 = bitcast { { i64, %Array* }*, i2, i64 }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %79 = add i64 %68, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 1) + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array*, %Array*, double }* }* getelementptr ({ %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* null, i32 1) to i64)) + %81 = bitcast %Tuple* %80 to { %Callable*, { %Array*, %Array*, double }* }* + %82 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %81, i32 0, i32 0 + %83 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %81, i32 0, i32 1 + store %Callable* %59, %Callable** %82, align 8 + store { %Array*, %Array*, double }* %61, { %Array*, %Array*, double }** %83, align 8 + %84 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__6__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__5__FunctionTable, %Tuple* %80) + %85 = call double @Microsoft__Quantum__Characterization__EstimateRealOverlapBetweenStates__body(%Callable* %32, %Callable* %58, %Callable* %84, i64 %nQubits, i64 %nMeasurements) + call void @__quantum__rt__capture_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + %86 = sub i64 %5, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %87 = phi i64 [ 0, %exit__3 ], [ %98, %exiting__4 ] + %88 = icmp sle i64 %87, %86 + br i1 %88, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %87) + %90 = bitcast i8* %89 to { { i64, %Array* }*, i2, i64 }** + %91 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %90, align 8 + %92 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %91, i32 0, i32 0 + %93 = load { i64, %Array* }*, { i64, %Array* }** %92, align 8 + %94 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %93, i32 0, i32 1 + %95 = load %Array*, %Array** %94, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %95, i32 -1) + %96 = bitcast { i64, %Array* }* %93 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %96, i32 -1) + %97 = bitcast { { i64, %Array* }*, i2, i64 }* %91 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %97, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %98 = add i64 %87, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %32, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %58, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %58, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %84, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %84, i32 -1) + ret double %85 +} + +define internal double @Microsoft__Quantum__Characterization__EstimateRealOverlapBetweenStates__body(%Callable* %commonPreparation, %Callable* %preparation1, %Callable* %preparation2, i64 %nQubits, i64 %nMeasurements) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %preparation2, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, i1, %Callable*, %Callable*, %Callable* }* getelementptr ({ %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, i1, %Callable*, %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 1 + %5 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 2 + %6 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 3 + %7 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %2, i32 0, i32 4 + store %Callable* %0, %Callable** %3, align 8 + store i1 false, i1* %4, align 1 + store %Callable* %commonPreparation, %Callable** %5, align 8 + store %Callable* %preparation1, %Callable** %6, align 8 + store %Callable* %preparation2, %Callable** %7, align 8 + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__16__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__14__FunctionTable, %Tuple* %1) + %9 = add i64 %nQubits, 1 + %10 = call %Callable* @Microsoft__Quantum__Characterization____QsRef1__HeadMeasurement____body(i64 %9) + %11 = add i64 %nQubits, 1 + %12 = call double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %8, %Callable* %10, i64 %11, i64 %nMeasurements) + %13 = fmul double 2.000000e+00, %12 + %14 = fsub double %13, 1.000000e+00 + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret double %14 +} + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Callable* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Callable* }* %2, { i64, %Callable* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %2 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { i64, %Callable* }*, %Array* }* + %8 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %7, i32 0, i32 1 + store { i64, %Callable* }* %2, { i64, %Callable* }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %9, i32 0, i32 1 + store { i64, %Callable* }* %7, { i64, %Callable* }** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { i64, %Callable* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { i64, %Callable* }*, %Array* }* %9, { { i64, %Callable* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %6 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %7 = load { i64, %Callable* }*, { i64, %Callable* }** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }*, %Array* }* getelementptr ({ { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { i64, %Callable* }*, %Array* }* + %10 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %9, i32 0, i32 1 + store { i64, %Callable* }* %7, { i64, %Callable* }** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { i64, %Callable* }*, %Array* }* }* getelementptr ({ %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { i64, %Callable* }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { i64, %Callable* }*, %Array* }* %9, { { i64, %Callable* }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____body({ i64, %Callable* }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { i64, %Callable* }*, %Array* }* + %1 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { i64, %Callable* }*, %Array* }, { { i64, %Callable* }*, %Array* }* %0, i32 0, i32 1 + %3 = load { i64, %Callable* }*, { i64, %Callable* }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____adj({ i64, %Callable* }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, %Array* }*, { { i64, %Callable* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____ctl(%Array* %3, { { i64, %Callable* }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { i64, %Callable* }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { i64, %Callable* }*, %Array* }* }, { %Array*, { { i64, %Callable* }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { i64, %Callable* }*, %Array* }*, { { i64, %Callable* }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0___1b18656b59d5446c8d4e6d6de10b907b_EstimateDerivativeWithParameterShift____ctladj(%Array* %3, { { i64, %Callable* }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__4__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__4__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { i64, %Callable* }* }, { %Callable*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %3, align 8 + %5 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %4, i32 0, i32 1 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + %7 = bitcast { i64, %Callable* }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__5__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %2 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, %Array* }* getelementptr ({ { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Array*, %Array*, double }*, %Array* }* + %8 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %7, i32 0, i32 1 + store { %Array*, %Array*, double }* %2, { %Array*, %Array*, double }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %2 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, %Array* }* getelementptr ({ { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Array*, %Array*, double }*, %Array* }* + %8 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %7, i32 0, i32 1 + store { %Array*, %Array*, double }* %2, { %Array*, %Array*, double }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %6 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %5, i32 0, i32 1 + %7 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, %Array* }* getelementptr ({ { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array*, %Array*, double }*, %Array* }* + %10 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 1 + store { %Array*, %Array*, double }* %7, { %Array*, %Array*, double }** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array*, %Array*, double }*, %Array* }* }* getelementptr ({ %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array*, %Array*, double }*, %Array* }* %9, { { %Array*, %Array*, double }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__5__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %6 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %5, i32 0, i32 1 + %7 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, %Array* }* getelementptr ({ { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array*, %Array*, double }*, %Array* }* + %10 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 1 + store { %Array*, %Array*, double }* %7, { %Array*, %Array*, double }** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array*, %Array*, double }*, %Array* }* }* getelementptr ({ %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array*, %Array*, double }*, %Array* }* %9, { { %Array*, %Array*, double }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array*, double }*, %Array* }* + %1 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 1 + %3 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__body({ %Array*, %Array*, double }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, %Array*, double }*, %Array* }* + %1 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 1 + %3 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__adj({ %Array*, %Array*, double }* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array*, %Array*, double }*, %Array* }*, { { %Array*, %Array*, double }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__ctl(%Array* %3, { { %Array*, %Array*, double }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { { %Array*, %Array*, double }*, %Array* }*, { { %Array*, %Array*, double }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__ctladj(%Array* %3, { { %Array*, %Array*, double }*, %Array* }* %4) + ret void +} + +define internal %Array* @Microsoft__Quantum__Canon___db88ab328d524c76b8a01044e6a98e3d_Fst__body(%Array* %0, %Array* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { %Array*, %Array* }* + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %pair, i32 0, i32 1 + store %Array* %0, %Array** %3, align 8 + store %Array* %1, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret %Array* %0 +} + +define internal void @MemoryManagement__5__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %4 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %3, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %4, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { i64, %Array* }*, i2, i64 }** + %13 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %12, align 8 + %14 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %13, i32 0, i32 0 + %15 = load { i64, %Array* }*, { i64, %Array* }** %14, align 8 + %16 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %15, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 %count-change) + %18 = bitcast { i64, %Array* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 %count-change) + %19 = bitcast { { i64, %Array* }*, i2, i64 }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 %count-change) + %21 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %4, i32 0, i32 1 + %22 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 %count-change) + %23 = bitcast { %Array*, %Array*, double }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__5__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %4 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %3, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %4, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { i64, %Array* }*, i2, i64 }** + %13 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %12, align 8 + %14 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %13, i32 0, i32 0 + %15 = load { i64, %Array* }*, { i64, %Array* }** %14, align 8 + %16 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %15, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 %count-change) + %18 = bitcast { i64, %Array* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 %count-change) + %19 = bitcast { { i64, %Array* }*, i2, i64 }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 %count-change) + %21 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %4, i32 0, i32 1 + %22 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %22, i32 %count-change) + %23 = bitcast { %Array*, %Array*, double }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__6__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %2 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, %Array* }* getelementptr ({ { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Array*, %Array*, double }*, %Array* }* + %8 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %7, i32 0, i32 1 + store { %Array*, %Array*, double }* %2, { %Array*, %Array*, double }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %2 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, %Array* }* getelementptr ({ { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { { %Array*, %Array*, double }*, %Array* }* + %8 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %7, i32 0, i32 1 + store { %Array*, %Array*, double }* %2, { %Array*, %Array*, double }** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %6 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %5, i32 0, i32 1 + %7 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, %Array* }* getelementptr ({ { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array*, %Array*, double }*, %Array* }* + %10 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 1 + store { %Array*, %Array*, double }* %7, { %Array*, %Array*, double }** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array*, %Array*, double }*, %Array* }* }* getelementptr ({ %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array*, %Array*, double }*, %Array* }* %9, { { %Array*, %Array*, double }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__6__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, { %Array*, %Array*, double }* }* + %6 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %5, i32 0, i32 1 + %7 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, %Array*, double }*, %Array* }* getelementptr ({ { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { { %Array*, %Array*, double }*, %Array* }* + %10 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 1 + store { %Array*, %Array*, double }* %7, { %Array*, %Array*, double }** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { { %Array*, %Array*, double }*, %Array* }* }* getelementptr ({ %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { { %Array*, %Array*, double }*, %Array* }* }, { %Array*, { { %Array*, %Array*, double }*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { { %Array*, %Array*, double }*, %Array* }* %9, { { %Array*, %Array*, double }*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }* }, { %Callable*, { %Array*, %Array*, double }* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Canon___002990bbfeef4011a0d3a74007551555_Snd__body(%Array* %0, %Array* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { %Array*, %Array* }* + %3 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %pair, i32 0, i32 1 + store %Array* %0, %Array** %3, align 8 + store %Array* %1, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret %Array* %1 +} + +define internal void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__body({ %Array*, %Array*, double }* %model, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %19 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %52, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %20) + %23 = bitcast i8* %22 to { { i64, %Array* }*, i2, i64 }** + %gate = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %23, align 8 + %24 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 0 + %25 = load { i64, %Array* }*, { i64, %Array* }** %24, align 8 + %26 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %25, i32 0, i32 1 + %27 = load %Array*, %Array** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %28 = bitcast { i64, %Array* }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + %29 = bitcast { { i64, %Array* }*, i2, i64 }* %gate to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 1) + %30 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 2 + %31 = load i64, i64* %30, align 4 + %32 = load %Array*, %Array** %16, align 8 + %33 = call i64 @__quantum__rt__array_get_size_1d(%Array* %32) + %34 = icmp slt i64 %31, %33 + br i1 %34, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %35 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 1 + %pauli = load i2, i2* %35, align 1 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 %31) + %37 = bitcast i8* %36 to double* + %theta = load double, double* %37, align 8 + %38 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %25, i32 0, i32 0 + %39 = load i64, i64* %38, align 4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %39) + %41 = bitcast i8* %40 to %Qubit** + %qubit = load %Qubit*, %Qubit** %41, align 8 + %42 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %input = bitcast %Tuple* %42 to { i2, double, %Qubit* }* + %43 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %input, i32 0, i32 0 + %44 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %input, i32 0, i32 1 + %45 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %input, i32 0, i32 2 + store i2 %pauli, i2* %43, align 1 + store double %theta, double* %44, align 8 + store %Qubit* %qubit, %Qubit** %45, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + %46 = call i1 @Microsoft__Quantum__Arrays___cddb1db8090d4b2580514eb678e65fbd_IsEmpty__body(%Array* %27) + br i1 %46, label %then0__2, label %else__1 + +then0__2: ; preds = %then0__1 + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + br label %continue__2 + +else__1: ; preds = %then0__1 + %__controlQubits__ = call %Array* @Microsoft__Quantum__Arrays___93b43ff3c247411b9de017f51b2344c9_Subarray__body(%Array* %27, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { i2, double, %Qubit* }* + %49 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %48, i32 0, i32 2 + store i2 %pauli, i2* %49, align 1 + store double %theta, double* %50, align 8 + store %Qubit* %qubit, %Qubit** %51, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %48) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %then0__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__2, %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %52 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %53 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %54 = phi i64 [ 0, %exit__2 ], [ %65, %exiting__3 ] + %55 = icmp sle i64 %54, %53 + br i1 %55, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %54) + %57 = bitcast i8* %56 to { { i64, %Array* }*, i2, i64 }** + %58 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %57, align 8 + %59 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %58, i32 0, i32 0 + %60 = load { i64, %Array* }*, { i64, %Array* }** %59, align 8 + %61 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %60, i32 0, i32 1 + %62 = load %Array*, %Array** %61, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %62, i32 -1) + %63 = bitcast { i64, %Array* }* %60 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %63, i32 -1) + %64 = bitcast { { i64, %Array* }*, i2, i64 }* %58 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %64, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %65 = add i64 %54, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + %66 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__adj({ %Array*, %Array*, double }* %model, %Array* %qubits) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %19 = sub i64 %2, 1 + %20 = insertvalue %Range zeroinitializer, i64 %19, 0 + %21 = insertvalue %Range %20, i64 -1, 1 + %22 = insertvalue %Range %21, i64 0, 2 + %23 = call %Array* @__quantum__rt__array_slice_1d(%Array* %1, %Range %22, i1 true) + %24 = call i64 @__quantum__rt__array_get_size_1d(%Array* %23) + %25 = sub i64 %24, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %26 = phi i64 [ 0, %exit__1 ], [ %58, %exiting__2 ] + %27 = icmp sle i64 %26, %25 + br i1 %27, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %26) + %29 = bitcast i8* %28 to { { i64, %Array* }*, i2, i64 }** + %__qsVar0__gate__ = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %29, align 8 + %30 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %__qsVar0__gate__, i32 0, i32 0 + %31 = load { i64, %Array* }*, { i64, %Array* }** %30, align 8 + %32 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %31, i32 0, i32 1 + %33 = load %Array*, %Array** %32, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + %34 = bitcast { i64, %Array* }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 1) + %35 = bitcast { { i64, %Array* }*, i2, i64 }* %__qsVar0__gate__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 1) + %36 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %__qsVar0__gate__, i32 0, i32 2 + %37 = load i64, i64* %36, align 4 + %38 = load %Array*, %Array** %16, align 8 + %39 = call i64 @__quantum__rt__array_get_size_1d(%Array* %38) + %40 = icmp slt i64 %37, %39 + br i1 %40, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %41 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %__qsVar0__gate__, i32 0, i32 1 + %pauli = load i2, i2* %41, align 1 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %37) + %43 = bitcast i8* %42 to double* + %theta = load double, double* %43, align 8 + %44 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %31, i32 0, i32 0 + %45 = load i64, i64* %44, align 4 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %45) + %47 = bitcast i8* %46 to %Qubit** + %qubit = load %Qubit*, %Qubit** %47, align 8 + %48 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %__qsVar1__input__ = bitcast %Tuple* %48 to { i2, double, %Qubit* }* + %49 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %__qsVar1__input__, i32 0, i32 0 + %50 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %__qsVar1__input__, i32 0, i32 1 + %51 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %__qsVar1__input__, i32 0, i32 2 + store i2 %pauli, i2* %49, align 1 + store double %theta, double* %50, align 8 + store %Qubit* %qubit, %Qubit** %51, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 1) + %52 = call i1 @Microsoft__Quantum__Arrays___cddb1db8090d4b2580514eb678e65fbd_IsEmpty__body(%Array* %33) + br i1 %52, label %then0__2, label %else__1 + +then0__2: ; preds = %then0__1 + call void @__quantum__qis__r__adj(i2 %pauli, double %theta, %Qubit* %qubit) + br label %continue__2 + +else__1: ; preds = %then0__1 + %__controlQubits__ = call %Array* @Microsoft__Quantum__Arrays___93b43ff3c247411b9de017f51b2344c9_Subarray__body(%Array* %33, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { i2, double, %Qubit* }* + %55 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %54, i32 0, i32 2 + store i2 %pauli, i2* %55, align 1 + store double %theta, double* %56, align 8 + store %Qubit* %qubit, %Qubit** %57, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %54) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %then0__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__2, %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %58 = add i64 %26, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %59 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %60 = phi i64 [ 0, %exit__2 ], [ %71, %exiting__3 ] + %61 = icmp sle i64 %60, %59 + br i1 %61, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %60) + %63 = bitcast i8* %62 to { { i64, %Array* }*, i2, i64 }** + %64 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %63, align 8 + %65 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %64, i32 0, i32 0 + %66 = load { i64, %Array* }*, { i64, %Array* }** %65, align 8 + %67 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %66, i32 0, i32 1 + %68 = load %Array*, %Array** %67, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + %69 = bitcast { i64, %Array* }* %66 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %69, i32 -1) + %70 = bitcast { { i64, %Array* }*, i2, i64 }* %64 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %71 = add i64 %60, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + %72 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__ctl(%Array* %__controlQubits__, { { %Array*, %Array*, double }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 0 + %model = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %1, align 8 + %2 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = sub i64 %4, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %6 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %6) + %9 = bitcast i8* %8 to { { i64, %Array* }*, i2, i64 }** + %10 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %9, align 8 + %11 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %10, i32 0, i32 0 + %12 = load { i64, %Array* }*, { i64, %Array* }** %11, align 8 + %13 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %12, i32 0, i32 1 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { i64, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = bitcast { { i64, %Array* }*, i2, i64 }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %18 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %20 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + %21 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %22 = sub i64 %4, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %61, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %23) + %26 = bitcast i8* %25 to { { i64, %Array* }*, i2, i64 }** + %gate = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %26, align 8 + %27 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 0 + %28 = load { i64, %Array* }*, { i64, %Array* }** %27, align 8 + %29 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %28, i32 0, i32 1 + %30 = load %Array*, %Array** %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 1) + %31 = bitcast { i64, %Array* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 1) + %32 = bitcast { { i64, %Array* }*, i2, i64 }* %gate to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 1) + %33 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 2 + %34 = load i64, i64* %33, align 4 + %35 = load %Array*, %Array** %18, align 8 + %36 = call i64 @__quantum__rt__array_get_size_1d(%Array* %35) + %37 = icmp slt i64 %34, %36 + br i1 %37, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %38 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 1 + %pauli = load i2, i2* %38, align 1 + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %34) + %40 = bitcast i8* %39 to double* + %theta = load double, double* %40, align 8 + %41 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %28, i32 0, i32 0 + %42 = load i64, i64* %41, align 4 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %42) + %44 = bitcast i8* %43 to %Qubit** + %qubit = load %Qubit*, %Qubit** %44, align 8 + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %input = bitcast %Tuple* %45 to { i2, double, %Qubit* }* + %46 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %input, i32 0, i32 0 + %47 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %input, i32 0, i32 1 + %48 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %input, i32 0, i32 2 + store i2 %pauli, i2* %46, align 1 + store double %theta, double* %47, align 8 + store %Qubit* %qubit, %Qubit** %48, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + %49 = call i1 @Microsoft__Quantum__Arrays___cddb1db8090d4b2580514eb678e65fbd_IsEmpty__body(%Array* %30) + br i1 %49, label %then0__2, label %else__1 + +then0__2: ; preds = %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %50 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %51 = bitcast %Tuple* %50 to { i2, double, %Qubit* }* + %52 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %51, i32 0, i32 0 + %53 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %51, i32 0, i32 1 + %54 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %51, i32 0, i32 2 + store i2 %pauli, i2* %52, align 1 + store double %theta, double* %53, align 8 + store %Qubit* %qubit, %Qubit** %54, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %51) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 -1) + br label %continue__2 + +else__1: ; preds = %then0__1 + %55 = call %Array* @Microsoft__Quantum__Arrays___93b43ff3c247411b9de017f51b2344c9_Subarray__body(%Array* %30, %Array* %qubits) + %__controlQubits__2 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %55) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__2, i32 1) + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { i2, double, %Qubit* }* + %58 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %57, i32 0, i32 1 + %60 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %57, i32 0, i32 2 + store i2 %pauli, i2* %58, align 1 + store double %theta, double* %59, align 8 + store %Qubit* %qubit, %Qubit** %60, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__2, { i2, double, %Qubit* }* %57) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %then0__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__2, %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %61 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %62 = sub i64 %4, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %63 = phi i64 [ 0, %exit__2 ], [ %74, %exiting__3 ] + %64 = icmp sle i64 %63, %62 + br i1 %64, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %63) + %66 = bitcast i8* %65 to { { i64, %Array* }*, i2, i64 }** + %67 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %66, align 8 + %68 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %67, i32 0, i32 0 + %69 = load { i64, %Array* }*, { i64, %Array* }** %68, align 8 + %70 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %69, i32 0, i32 1 + %71 = load %Array*, %Array** %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %71, i32 -1) + %72 = bitcast { i64, %Array* }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %72, i32 -1) + %73 = bitcast { { i64, %Array* }*, i2, i64 }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %74 = add i64 %63, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + %75 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__ctladj(%Array* %__controlQubits__, { { %Array*, %Array*, double }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 0 + %model = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %1, align 8 + %2 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %3 = load %Array*, %Array** %2, align 8 + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %5 = sub i64 %4, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %6 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %7 = icmp sle i64 %6, %5 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %6) + %9 = bitcast i8* %8 to { { i64, %Array* }*, i2, i64 }** + %10 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %9, align 8 + %11 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %10, i32 0, i32 0 + %12 = load { i64, %Array* }*, { i64, %Array* }** %11, align 8 + %13 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %12, i32 0, i32 1 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { i64, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = bitcast { { i64, %Array* }*, i2, i64 }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %6, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %18 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %20 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + %21 = getelementptr inbounds { { %Array*, %Array*, double }*, %Array* }, { { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %22 = sub i64 %4, 1 + %23 = insertvalue %Range zeroinitializer, i64 %22, 0 + %24 = insertvalue %Range %23, i64 -1, 1 + %25 = insertvalue %Range %24, i64 0, 2 + %26 = call %Array* @__quantum__rt__array_slice_1d(%Array* %3, %Range %25, i1 true) + %27 = call i64 @__quantum__rt__array_get_size_1d(%Array* %26) + %28 = sub i64 %27, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %29 = phi i64 [ 0, %exit__1 ], [ %67, %exiting__2 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %26, i64 %29) + %32 = bitcast i8* %31 to { { i64, %Array* }*, i2, i64 }** + %__qsVar0__gate__ = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %32, align 8 + %33 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %__qsVar0__gate__, i32 0, i32 0 + %34 = load { i64, %Array* }*, { i64, %Array* }** %33, align 8 + %35 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %34, i32 0, i32 1 + %36 = load %Array*, %Array** %35, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %36, i32 1) + %37 = bitcast { i64, %Array* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %37, i32 1) + %38 = bitcast { { i64, %Array* }*, i2, i64 }* %__qsVar0__gate__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %38, i32 1) + %39 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %__qsVar0__gate__, i32 0, i32 2 + %40 = load i64, i64* %39, align 4 + %41 = load %Array*, %Array** %18, align 8 + %42 = call i64 @__quantum__rt__array_get_size_1d(%Array* %41) + %43 = icmp slt i64 %40, %42 + br i1 %43, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %44 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %__qsVar0__gate__, i32 0, i32 1 + %pauli = load i2, i2* %44, align 1 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %40) + %46 = bitcast i8* %45 to double* + %theta = load double, double* %46, align 8 + %47 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %34, i32 0, i32 0 + %48 = load i64, i64* %47, align 4 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %48) + %50 = bitcast i8* %49 to %Qubit** + %qubit = load %Qubit*, %Qubit** %50, align 8 + %51 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %__qsVar1__input__ = bitcast %Tuple* %51 to { i2, double, %Qubit* }* + %52 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %__qsVar1__input__, i32 0, i32 0 + %53 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %__qsVar1__input__, i32 0, i32 1 + %54 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %__qsVar1__input__, i32 0, i32 2 + store i2 %pauli, i2* %52, align 1 + store double %theta, double* %53, align 8 + store %Qubit* %qubit, %Qubit** %54, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 1) + %55 = call i1 @Microsoft__Quantum__Arrays___cddb1db8090d4b2580514eb678e65fbd_IsEmpty__body(%Array* %36) + br i1 %55, label %then0__2, label %else__1 + +then0__2: ; preds = %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %56 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %57 = bitcast %Tuple* %56 to { i2, double, %Qubit* }* + %58 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %57, i32 0, i32 0 + %59 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %57, i32 0, i32 1 + %60 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %57, i32 0, i32 2 + store i2 %pauli, i2* %58, align 1 + store double %theta, double* %59, align 8 + store %Qubit* %qubit, %Qubit** %60, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %57) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + br label %continue__2 + +else__1: ; preds = %then0__1 + %61 = call %Array* @Microsoft__Quantum__Arrays___93b43ff3c247411b9de017f51b2344c9_Subarray__body(%Array* %36, %Array* %qubits) + %__controlQubits__2 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %61) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__2, i32 1) + %62 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %63 = bitcast %Tuple* %62 to { i2, double, %Qubit* }* + %64 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %63, i32 0, i32 0 + %65 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %63, i32 0, i32 1 + %66 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %63, i32 0, i32 2 + store i2 %pauli, i2* %64, align 1 + store double %theta, double* %65, align 8 + store %Qubit* %qubit, %Qubit** %66, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__2, { i2, double, %Qubit* }* %63) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %61, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__2, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %62, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %then0__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %51, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__2, %body__2 + call void @__quantum__rt__array_update_alias_count(%Array* %36, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %38, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %67 = add i64 %29, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %68 = sub i64 %4, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %69 = phi i64 [ 0, %exit__2 ], [ %80, %exiting__3 ] + %70 = icmp sle i64 %69, %68 + br i1 %70, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %69) + %72 = bitcast i8* %71 to { { i64, %Array* }*, i2, i64 }** + %73 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %72, align 8 + %74 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %73, i32 0, i32 0 + %75 = load { i64, %Array* }*, { i64, %Array* }** %74, align 8 + %76 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %75, i32 0, i32 1 + %77 = load %Array*, %Array** %76, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %77, i32 -1) + %78 = bitcast { i64, %Array* }* %75 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + %79 = bitcast { { i64, %Array* }*, i2, i64 }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %80 = add i64 %69, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + %81 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + ret void +} + +define internal double @Microsoft__Quantum__MachineLearning____QsRef0__MisclassificationRate____body(%Array* %probabilities, %Array* %labels, double %bias) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %labels, i32 1) + %proposedLabels = call %Array* @Microsoft__Quantum__MachineLearning__InferredLabels__body(double %bias, %Array* %probabilities) + call void @__quantum__rt__array_update_alias_count(%Array* %proposedLabels, i32 1) + %0 = call i64 @Microsoft__Quantum__MachineLearning__NMisclassifications__body(%Array* %proposedLabels, %Array* %labels) + %1 = sitofp i64 %0 to double + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %probabilities) + %3 = sitofp i64 %2 to double + %4 = fdiv double %1, %3 + call void @__quantum__rt__array_update_alias_count(%Array* %probabilities, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %labels, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %proposedLabels, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %proposedLabels, i32 -1) + ret double %4 +} + +define internal i64 @Microsoft__Quantum__MachineLearning__NMisclassifications__body(%Array* %proposed, %Array* %actual) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %proposed, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %actual, i32 1) + %0 = call %Array* @Microsoft__Quantum__MachineLearning__Misclassifications__body(%Array* %proposed, %Array* %actual) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + call void @__quantum__rt__array_update_alias_count(%Array* %proposed, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %actual, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret i64 %1 +} + +define internal %Array* @Microsoft__Quantum__MachineLearning____QsRef0__NegativeLocations____body(i64 %cNegative, %Array* %coefficients) { +entry: + %negLocs = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %9, %Array** %negLocs, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = call %Array* @Microsoft__Quantum__Arrays___0545950abc7941f6b22adfd20ed15024_Enumerated__body(%Array* %coefficients) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %10) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %32, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %13) + %16 = bitcast i8* %15 to { i64, { double, double }* }** + %17 = load { i64, { double, double }* }*, { i64, { double, double }* }** %16, align 8 + %18 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %17, i32 0, i32 0 + %idx = load i64, i64* %18, align 4 + %19 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %17, i32 0, i32 1 + %coefficient = load { double, double }*, { double, double }** %19, align 8 + %20 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + %21 = getelementptr inbounds { double, double }, { double, double }* %coefficient, i32 0, i32 1 + %22 = load double, double* %21, align 8 + %23 = call double @Microsoft__Quantum__Math__PI__body() + %24 = fsub double %22, %23 + %25 = call double @Microsoft__Quantum__Math__AbsD__body(double %24) + %26 = fcmp olt double %25, 1.000000e-09 + br i1 %26, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + %27 = load %Array*, %Array** %negLocs, align 8 + %28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 0) + %30 = bitcast i8* %29 to i64* + store i64 %idx, i64* %30, align 4 + %31 = call %Array* @__quantum__rt__array_concatenate(%Array* %27, %Array* %28) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + store %Array* %31, %Array** %negLocs, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %32 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %33 = load %Array*, %Array** %negLocs, align 8 + %34 = call i64 @__quantum__rt__array_get_size_1d(%Array* %33) + %35 = icmp sgt i64 %34, %cNegative + br i1 %35, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__2 + %36 = sub i64 %cNegative, 1 + %37 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %36, 2 + %38 = call %Array* @__quantum__rt__array_slice_1d(%Array* %33, %Range %37, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 -1) + br label %condContinue__1 + +condFalse__1: ; preds = %exit__2 + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %39 = phi %Array* [ %38, %condTrue__1 ], [ %33, %condFalse__1 ] + %40 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %condContinue__1 + %41 = phi i64 [ 0, %condContinue__1 ], [ %47, %exiting__3 ] + %42 = icmp sle i64 %41, %40 + br i1 %42, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %41) + %44 = bitcast i8* %43 to { double, double }** + %45 = load { double, double }*, { double, double }** %44, align 8 + %46 = bitcast { double, double }* %45 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %46, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %47 = add i64 %41, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 -1) + %48 = sub i64 %11, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %49 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %50 = icmp sle i64 %49, %48 + br i1 %50, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %49) + %52 = bitcast i8* %51 to { i64, { double, double }* }** + %53 = load { i64, { double, double }* }*, { i64, { double, double }* }** %52, align 8 + %54 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %53, i32 0, i32 1 + %55 = load { double, double }*, { double, double }** %54, align 8 + %56 = bitcast { double, double }* %55 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + %57 = bitcast { i64, { double, double }* }* %53 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %49, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 -1) + ret %Array* %39 +} + +define internal %Array* @Microsoft__Quantum__Arrays___0545950abc7941f6b22adfd20ed15024_Enumerated__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___b8c470817e3c4d54a387b72f70fe0572___QsRef1__Identity____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Array* @Microsoft__Quantum__Arrays___18b22a2872974a209223a31f4af592ba_MappedByIndex__body(%Callable* %9, %Array* %array) + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %12) + %15 = bitcast i8* %14 to { double, double }** + %16 = load { double, double }*, { double, double }** %15, align 8 + %17 = bitcast { double, double }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + ret %Array* %10 +} + +define internal double @Microsoft__Quantum__Math__PI__body() { +entry: + ret double 0x400921FB54442D18 +} + +declare %Array* @__quantum__rt__array_slice_1d(%Array*, %Range, i1) + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____body(%Callable* %encoder, { %Array*, %Array*, double }* %model, %Array* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %encoder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %encoder, i32 1) + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %19 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %target) + %20 = bitcast { %Array* }* %19 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %encoder, %Tuple* %20, %Tuple* null) + call void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__body({ %Array*, %Array*, double }* %model, %Array* %target) + %21 = getelementptr inbounds { %Array* }, { %Array* }* %19, i32 0, i32 0 + %22 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %encoder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %encoder, i32 -1) + %23 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %35, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %24) + %27 = bitcast i8* %26 to { { i64, %Array* }*, i2, i64 }** + %28 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %27, align 8 + %29 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %28, i32 0, i32 0 + %30 = load { i64, %Array* }*, { i64, %Array* }** %29, align 8 + %31 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %30, i32 0, i32 1 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = bitcast { i64, %Array* }* %30 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %33, i32 -1) + %34 = bitcast { { i64, %Array* }*, i2, i64 }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %35 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____adj(%Callable* %encoder, { %Array*, %Array*, double }* %model, %Array* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %encoder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %encoder, i32 1) + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + call void @Microsoft__Quantum__MachineLearning__ApplySequentialClassifier__adj({ %Array*, %Array*, double }* %model, %Array* %target) + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %encoder, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %target) + %21 = bitcast { %Array* }* %20 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %21, %Tuple* null) + %22 = getelementptr inbounds { %Array* }, { %Array* }* %20, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %encoder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %encoder, i32 -1) + %24 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %36, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %25) + %28 = bitcast i8* %27 to { { i64, %Array* }*, i2, i64 }** + %29 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %28, align 8 + %30 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %29, i32 0, i32 0 + %31 = load { i64, %Array* }*, { i64, %Array* }** %30, align 8 + %32 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %31, i32 0, i32 1 + %33 = load %Array*, %Array** %32, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 -1) + %34 = bitcast { i64, %Array* }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + %35 = bitcast { { i64, %Array* }*, i2, i64 }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %36 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Random__DrawRandomBool__body(double %successProbability) { +entry: + %0 = call double @__quantum__qis__drawrandomdouble__body(double 0.000000e+00, double 1.000000e+00) + %1 = fcmp ole double %0, %successProbability + ret i1 %1 +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____body(%Array* %negLocs, %Array* %coefficients, { %Array* }* %reg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %negLocs) + %13 = sub i64 %12, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %14 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %negLocs, i64 %14) + %17 = bitcast i8* %16 to i64* + %idxNegative = load i64, i64* %17, align 4 + call void @Microsoft__Quantum__Arithmetic__ReflectAboutInteger__body(i64 %idxNegative, { %Array* }* %reg) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %14, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 -1) + %19 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %20 = phi i64 [ 0, %exit__2 ], [ %26, %exiting__3 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %26 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arithmetic__ReflectAboutInteger__body(i64 %index, { %Array* }* %reg) { +entry: + %0 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %3) + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__Not__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %7 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %index, i64 %6) + %8 = call %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %5, %Array* %7) + %9 = call %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %8, %Array* %1) + call void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__body(%Callable* %4, %Array* %9) + %__controlQubits__ = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %qubit = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %11 = call %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %10) + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__Not__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %13 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %index, i64 %6) + %14 = call %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %12, %Array* %13) + %15 = call %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %14, %Array* %1) + call void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__adj(%Callable* %11, %Array* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %9) + %17 = sub i64 %16, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %18 = phi i64 [ 0, %entry ], [ %24, %exiting__1 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 %18) + %21 = bitcast i8* %20 to { i1, %Qubit* }** + %22 = load { i1, %Qubit* }*, { i1, %Qubit* }** %21, align 8 + %23 = bitcast { i1, %Qubit* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %18, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %15) + %26 = sub i64 %25, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %27 = phi i64 [ 0, %exit__1 ], [ %33, %exiting__2 ] + %28 = icmp sle i64 %27, %26 + br i1 %28, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %27) + %30 = bitcast i8* %29 to { i1, %Qubit* }** + %31 = load { i1, %Qubit* }*, { i1, %Qubit* }** %30, align 8 + %32 = bitcast { i1, %Qubit* }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %27, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____adj(%Array* %negLocs, %Array* %coefficients, { %Array* }* %reg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %negLocs) + %13 = sub i64 %12, 1 + %14 = insertvalue %Range zeroinitializer, i64 %13, 0 + %15 = insertvalue %Range %14, i64 -1, 1 + %16 = insertvalue %Range %15, i64 0, 2 + %17 = call %Array* @__quantum__rt__array_slice_1d(%Array* %negLocs, %Range %16, i1 true) + %18 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %19 = sub i64 %18, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %20 = phi i64 [ 0, %exit__1 ], [ %24, %exiting__2 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %20) + %23 = bitcast i8* %22 to i64* + %__qsVar0__idxNegative__ = load i64, i64* %23, align 4 + call void @Microsoft__Quantum__Arithmetic__ReflectAboutInteger__adj(i64 %__qsVar0__idxNegative__, { %Array* }* %reg) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %24 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 -1) + %25 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %26 = phi i64 [ 0, %exit__2 ], [ %32, %exiting__3 ] + %27 = icmp sle i64 %26, %25 + br i1 %27, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %26) + %29 = bitcast i8* %28 to { double, double }** + %30 = load { double, double }*, { double, double }** %29, align 8 + %31 = bitcast { double, double }* %30 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %32 = add i64 %26, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arithmetic__ReflectAboutInteger__adj(i64 %index, { %Array* }* %reg) { +entry: + %0 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %3) + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__Not__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %7 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %index, i64 %6) + %8 = call %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %5, %Array* %7) + %9 = call %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %8, %Array* %1) + call void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__body(%Callable* %4, %Array* %9) + %__controlQubits__ = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %qubit = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %11 = call %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %10) + %12 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__Not__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %13 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %index, i64 %6) + %14 = call %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %12, %Array* %13) + %15 = call %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %14, %Array* %1) + call void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__adj(%Callable* %11, %Array* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %9) + %17 = sub i64 %16, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %18 = phi i64 [ 0, %entry ], [ %24, %exiting__1 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 %18) + %21 = bitcast i8* %20 to { i1, %Qubit* }** + %22 = load { i1, %Qubit* }*, { i1, %Qubit* }** %21, align 8 + %23 = bitcast { i1, %Qubit* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %24 = add i64 %18, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %15) + %26 = sub i64 %25, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %27 = phi i64 [ 0, %exit__1 ], [ %33, %exiting__2 ] + %28 = icmp sle i64 %27, %26 + br i1 %28, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %27) + %30 = bitcast i8* %29 to { i1, %Qubit* }** + %31 = load { i1, %Qubit* }*, { i1, %Qubit* }** %30, align 8 + %32 = bitcast { i1, %Qubit* }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %27, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____ctl(%Array* %__controlQubits__, { %Array*, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %negLocs = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %reg = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %negLocs) + %17 = sub i64 %16, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %negLocs, i64 %18) + %21 = bitcast i8* %20 to i64* + %idxNegative = load i64, i64* %21, align 4 + %22 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { %Array* }* }* getelementptr ({ i64, { %Array* }* }, { i64, { %Array* }* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { i64, { %Array* }* }* + %25 = getelementptr inbounds { i64, { %Array* }* }, { i64, { %Array* }* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { i64, { %Array* }* }, { i64, { %Array* }* }* %24, i32 0, i32 1 + store i64 %idxNegative, i64* %25, align 4 + store { %Array* }* %reg, { %Array* }** %26, align 8 + call void @Microsoft__Quantum__Arithmetic__ReflectAboutInteger__ctl(%Array* %__controlQubits__, { i64, { %Array* }* }* %24) + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 -1) + %28 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %36 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %36, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arithmetic__ReflectAboutInteger__ctl(%Array* %__controlQubits__, { i64, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { %Array* }* }, { i64, { %Array* }* }* %0, i32 0, i32 0 + %index = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { %Array* }* }, { i64, { %Array* }* }* %0, i32 0, i32 1 + %reg = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %7 = call %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %6) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__Not__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %10 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %index, i64 %9) + %11 = call %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %8, %Array* %10) + %12 = call %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %11, %Array* %4) + call void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__body(%Callable* %7, %Array* %12) + %13 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %4) + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + %qubit = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %4) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__1, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %15 = call %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %14) + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__Not__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %17 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %index, i64 %9) + %18 = call %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %16, %Array* %17) + %19 = call %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %18, %Array* %4) + call void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__adj(%Callable* %15, %Array* %19) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + %20 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %21 = sub i64 %20, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %22 = phi i64 [ 0, %entry ], [ %28, %exiting__1 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %22) + %25 = bitcast i8* %24 to { i1, %Qubit* }** + %26 = load { i1, %Qubit* }*, { i1, %Qubit* }** %25, align 8 + %27 = bitcast { i1, %Qubit* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %28 = add i64 %22, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %19) + %30 = sub i64 %29, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %31 = phi i64 [ 0, %exit__1 ], [ %37, %exiting__2 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %31) + %34 = bitcast i8* %33 to { i1, %Qubit* }** + %35 = load { i1, %Qubit* }*, { i1, %Qubit* }** %34, align 8 + %36 = bitcast { i1, %Qubit* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %37 = add i64 %31, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____ctladj(%Array* %__controlQubits__, { %Array*, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %negLocs = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %5) + %8 = bitcast i8* %7 to { double, double }** + %9 = load { double, double }*, { double, double }** %8, align 8 + %10 = bitcast { double, double }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %12 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %reg = load { %Array* }*, { %Array* }** %12, align 8 + %13 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %14 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %15 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %negLocs) + %17 = sub i64 %16, 1 + %18 = insertvalue %Range zeroinitializer, i64 %17, 0 + %19 = insertvalue %Range %18, i64 -1, 1 + %20 = insertvalue %Range %19, i64 0, 2 + %21 = call %Array* @__quantum__rt__array_slice_1d(%Array* %negLocs, %Range %20, i1 true) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %21) + %23 = sub i64 %22, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %24 = phi i64 [ 0, %exit__1 ], [ %33, %exiting__2 ] + %25 = icmp sle i64 %24, %23 + br i1 %25, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %24) + %27 = bitcast i8* %26 to i64* + %__qsVar0__idxNegative__ = load i64, i64* %27, align 4 + %28 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + %29 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { %Array* }* }* getelementptr ({ i64, { %Array* }* }, { i64, { %Array* }* }* null, i32 1) to i64)) + %30 = bitcast %Tuple* %29 to { i64, { %Array* }* }* + %31 = getelementptr inbounds { i64, { %Array* }* }, { i64, { %Array* }* }* %30, i32 0, i32 0 + %32 = getelementptr inbounds { i64, { %Array* }* }, { i64, { %Array* }* }* %30, i32 0, i32 1 + store i64 %__qsVar0__idxNegative__, i64* %31, align 4 + store { %Array* }* %reg, { %Array* }** %32, align 8 + call void @Microsoft__Quantum__Arithmetic__ReflectAboutInteger__ctladj(%Array* %__controlQubits__, { i64, { %Array* }* }* %30) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %29, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %24, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 -1) + %34 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %35) + %38 = bitcast i8* %37 to { double, double }** + %39 = load { double, double }*, { double, double }** %38, align 8 + %40 = bitcast { double, double }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %42 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arithmetic__ReflectAboutInteger__ctladj(%Array* %__controlQubits__, { i64, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i64, { %Array* }* }, { i64, { %Array* }* }* %0, i32 0, i32 0 + %index = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, { %Array* }* }, { i64, { %Array* }* }* %0, i32 0, i32 1 + %reg = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %reg, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %reg to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %7 = call %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %6) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__Not__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %10 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %index, i64 %9) + %11 = call %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %8, %Array* %10) + %12 = call %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %11, %Array* %4) + call void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__body(%Callable* %7, %Array* %12) + %13 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %4) + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %13) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + %qubit = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %4) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__1, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + %14 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__X__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %15 = call %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %14) + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Logical__Not__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %17 = call %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %index, i64 %9) + %18 = call %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %16, %Array* %17) + %19 = call %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %18, %Array* %4) + call void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__adj(%Callable* %15, %Array* %19) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + %20 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %21 = sub i64 %20, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %22 = phi i64 [ 0, %entry ], [ %28, %exiting__1 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %22) + %25 = bitcast i8* %24 to { i1, %Qubit* }** + %26 = load { i1, %Qubit* }*, { i1, %Qubit* }** %25, align 8 + %27 = bitcast { i1, %Qubit* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %28 = add i64 %22, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %19) + %30 = sub i64 %29, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %31 = phi i64 [ 0, %exit__1 ], [ %37, %exiting__2 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %31) + %34 = bitcast i8* %33 to { i1, %Qubit* }** + %35 = load { i1, %Qubit* }*, { i1, %Qubit* }** %34, align 8 + %36 = bitcast { i1, %Qubit* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %37 = add i64 %31, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___9021424cc3274213b24cdef7f22a1dcc_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %5 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %4, align 8 + %6 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %7 = load { %Array*, i64 }*, { %Array*, i64 }** %6, align 8 + %8 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = bitcast { %Array*, i64 }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %11 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %12 = load { i64, %Callable* }*, { i64, %Callable* }** %11, align 8 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + %15 = bitcast { i64, %Callable* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %18 = icmp eq i64 %length, 0 + br i1 %18, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %20 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %22 = bitcast i8* %21 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %23 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %22, align 8 + %24 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %23 to %Tuple* + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }* }* getelementptr ({ { %Array*, i64 }* }, { { %Array*, i64 }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { %Array*, i64 }* }* + %27 = getelementptr inbounds { { %Array*, i64 }* }, { { %Array*, i64 }* }* %26, i32 0, i32 0 + %first = load { %Array*, i64 }*, { %Array*, i64 }** %27, align 8 + %28 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %first, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 1) + %30 = bitcast { %Array*, i64 }* %first to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %30, i32 1) + %31 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %32 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %33 = phi i64 [ 0, %then0__1 ], [ %49, %exiting__2 ] + %34 = icmp sle i64 %33, %20 + br i1 %34, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %33) + %36 = bitcast i8* %35 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %37 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %36, align 8 + %38 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %37, i32 0, i32 0 + %39 = load { %Array*, i64 }*, { %Array*, i64 }** %38, align 8 + %40 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %42 = bitcast { %Array*, i64 }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %43 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %37, i32 0, i32 1 + %44 = load { i64, %Callable* }*, { i64, %Callable* }** %43, align 8 + %45 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %44, i32 0, i32 1 + %46 = load %Callable*, %Callable** %45, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %46, i32 -1) + %47 = bitcast { i64, %Callable* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + %48 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %37 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %49 = add i64 %33, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %19 + +header__3: ; preds = %exiting__3, %continue__1 + %50 = phi i64 [ 0, %continue__1 ], [ %55, %exiting__3 ] + %51 = icmp sle i64 %50, %32 + br i1 %51, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %50) + %53 = bitcast i8* %52 to { %Array*, i64 }** + store { %Array*, i64 }* %first, { %Array*, i64 }** %53, align 8 + %54 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %55 = add i64 %50, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %31, %Array** %retval, align 8 + %56 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %57 = phi i64 [ 0, %exit__3 ], [ %65, %exiting__4 ] + %58 = icmp sle i64 %57, %56 + br i1 %58, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %57) + %60 = bitcast i8* %59 to { %Array*, i64 }** + %61 = load { %Array*, i64 }*, { %Array*, i64 }** %60, align 8 + %62 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %61, i32 0, i32 0 + %63 = load %Array*, %Array** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %63, i32 1) + %64 = bitcast { %Array*, i64 }* %61 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %64, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %65 = add i64 %57, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + %66 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %87, %exiting__5 ] + %67 = icmp sle i64 %idx, %66 + br i1 %67, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %68 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + %69 = call %Array* @__quantum__rt__array_copy(%Array* %68, i1 false) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %71 = bitcast i8* %70 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %72 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %71, align 8 + %73 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %72 to %Tuple* + %74 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }* }* getelementptr ({ { %Array*, i64 }* }, { { %Array*, i64 }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %73, %Tuple* %74) + %75 = bitcast %Tuple* %74 to { { %Array*, i64 }* }* + %76 = getelementptr inbounds { { %Array*, i64 }* }, { { %Array*, i64 }* }* %75, i32 0, i32 0 + %77 = load { %Array*, i64 }*, { %Array*, i64 }** %76, align 8 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %idx) + %79 = bitcast i8* %78 to { %Array*, i64 }** + %80 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %77, i32 0, i32 0 + %81 = load %Array*, %Array** %80, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 1) + %82 = bitcast { %Array*, i64 }* %77 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %82, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %82, i32 1) + %83 = load { %Array*, i64 }*, { %Array*, i64 }** %79, align 8 + %84 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %83, i32 0, i32 0 + %85 = load %Array*, %Array** %84, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %85, i32 -1) + %86 = bitcast { %Array*, i64 }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %85, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 -1) + store { %Array*, i64 }* %77, { %Array*, i64 }** %79, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 1) + store %Array* %69, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %68, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %82, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %87 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %88 = load %Array*, %Array** %retval, align 8 + %89 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %90 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %91 = phi i64 [ 0, %exit__5 ], [ %107, %exiting__6 ] + %92 = icmp sle i64 %91, %90 + br i1 %92, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %91) + %94 = bitcast i8* %93 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %95 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %94, align 8 + %96 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %95, i32 0, i32 0 + %97 = load { %Array*, i64 }*, { %Array*, i64 }** %96, align 8 + %98 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %97, i32 0, i32 0 + %99 = load %Array*, %Array** %98, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + %100 = bitcast { %Array*, i64 }* %97 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %100, i32 -1) + %101 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %95, i32 0, i32 1 + %102 = load { i64, %Callable* }*, { i64, %Callable* }** %101, align 8 + %103 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %102, i32 0, i32 1 + %104 = load %Callable*, %Callable** %103, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %104, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %104, i32 -1) + %105 = bitcast { i64, %Callable* }* %102 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %105, i32 -1) + %106 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %106, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %107 = add i64 %91, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %89, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %30, i32 -1) + %108 = call i64 @__quantum__rt__array_get_size_1d(%Array* %88) + %109 = sub i64 %108, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %110 = phi i64 [ 0, %exit__6 ], [ %118, %exiting__7 ] + %111 = icmp sle i64 %110, %109 + br i1 %111, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 %110) + %113 = bitcast i8* %112 to { %Array*, i64 }** + %114 = load { %Array*, i64 }*, { %Array*, i64 }** %113, align 8 + %115 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %114, i32 0, i32 0 + %116 = load %Array*, %Array** %115, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %116, i32 -1) + %117 = bitcast { %Array*, i64 }* %114 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %117, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %118 = add i64 %110, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %89, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + ret %Array* %88 +} + +define internal void @Microsoft__Quantum__Canon___1f5badf5e91544c8bbff3b59164a3bb0_Fst__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, i64 }*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %3 = load { %Array*, i64 }*, { %Array*, i64 }** %1, align 8 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %5 = call { %Array*, i64 }* @Microsoft__Quantum__Canon___1f5badf5e91544c8bbff3b59164a3bb0_Fst__body({ %Array*, i64 }* %3, { i64, %Callable* }* %4) + %6 = bitcast %Tuple* %result-tuple to { { %Array*, i64 }* }* + %7 = getelementptr inbounds { { %Array*, i64 }* }, { { %Array*, i64 }* }* %6, i32 0, i32 0 + store { %Array*, i64 }* %5, { %Array*, i64 }** %7, align 8 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___01f2a4172bae4fb9a526206ae21b7f8f_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %5 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %4, align 8 + %6 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %7 = load { %Array*, i64 }*, { %Array*, i64 }** %6, align 8 + %8 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = bitcast { %Array*, i64 }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %11 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %12 = load { i64, %Callable* }*, { i64, %Callable* }** %11, align 8 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + %15 = bitcast { i64, %Callable* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %18 = icmp eq i64 %length, 0 + br i1 %18, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %20 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %22 = bitcast i8* %21 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %23 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %22, align 8 + %24 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %23 to %Tuple* + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }* }, { { i64, %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %24, %Tuple* %25) + %26 = bitcast %Tuple* %25 to { { i64, %Callable* }* }* + %27 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %26, i32 0, i32 0 + %first = load { i64, %Callable* }*, { i64, %Callable* }** %27, align 8 + %28 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %first, i32 0, i32 1 + %29 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %29, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %29, i32 1) + %30 = bitcast { i64, %Callable* }* %first to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %30, i32 1) + %31 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %32 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %33 = phi i64 [ 0, %then0__1 ], [ %49, %exiting__2 ] + %34 = icmp sle i64 %33, %20 + br i1 %34, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %33) + %36 = bitcast i8* %35 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %37 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %36, align 8 + %38 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %37, i32 0, i32 0 + %39 = load { %Array*, i64 }*, { %Array*, i64 }** %38, align 8 + %40 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %39, i32 0, i32 0 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %42 = bitcast { %Array*, i64 }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + %43 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %37, i32 0, i32 1 + %44 = load { i64, %Callable* }*, { i64, %Callable* }** %43, align 8 + %45 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %44, i32 0, i32 1 + %46 = load %Callable*, %Callable** %45, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %46, i32 -1) + %47 = bitcast { i64, %Callable* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + %48 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %37 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %49 = add i64 %33, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %19 + +header__3: ; preds = %exiting__3, %continue__1 + %50 = phi i64 [ 0, %continue__1 ], [ %55, %exiting__3 ] + %51 = icmp sle i64 %50, %32 + br i1 %51, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %50) + %53 = bitcast i8* %52 to { i64, %Callable* }** + store { i64, %Callable* }* %first, { i64, %Callable* }** %53, align 8 + %54 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %54, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %54, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %55 = add i64 %50, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %31, %Array** %retval, align 8 + %56 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %57 = phi i64 [ 0, %exit__3 ], [ %65, %exiting__4 ] + %58 = icmp sle i64 %57, %56 + br i1 %58, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %59 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %57) + %60 = bitcast i8* %59 to { i64, %Callable* }** + %61 = load { i64, %Callable* }*, { i64, %Callable* }** %60, align 8 + %62 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %61, i32 0, i32 1 + %63 = load %Callable*, %Callable** %62, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %63, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %63, i32 1) + %64 = bitcast { i64, %Callable* }* %61 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %64, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %65 = add i64 %57, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + %66 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %87, %exiting__5 ] + %67 = icmp sle i64 %idx, %66 + br i1 %67, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %68 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %68, i32 -1) + %69 = call %Array* @__quantum__rt__array_copy(%Array* %68, i1 false) + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %71 = bitcast i8* %70 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %72 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %71, align 8 + %73 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %72 to %Tuple* + %74 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { i64, %Callable* }* }* getelementptr ({ { i64, %Callable* }* }, { { i64, %Callable* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %73, %Tuple* %74) + %75 = bitcast %Tuple* %74 to { { i64, %Callable* }* }* + %76 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %75, i32 0, i32 0 + %77 = load { i64, %Callable* }*, { i64, %Callable* }** %76, align 8 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %idx) + %79 = bitcast i8* %78 to { i64, %Callable* }** + %80 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %77, i32 0, i32 1 + %81 = load %Callable*, %Callable** %80, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %81, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %81, i32 1) + %82 = bitcast { i64, %Callable* }* %77 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %82, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %81, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %81, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %82, i32 1) + %83 = load { i64, %Callable* }*, { i64, %Callable* }** %79, align 8 + %84 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %83, i32 0, i32 1 + %85 = load %Callable*, %Callable** %84, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %85, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %85, i32 -1) + %86 = bitcast { i64, %Callable* }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %85, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %85, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 -1) + store { i64, %Callable* }* %77, { i64, %Callable* }** %79, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 1) + store %Array* %69, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %68, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %81, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %81, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %82, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %87 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %88 = load %Array*, %Array** %retval, align 8 + %89 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %90 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %91 = phi i64 [ 0, %exit__5 ], [ %107, %exiting__6 ] + %92 = icmp sle i64 %91, %90 + br i1 %92, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %93 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %91) + %94 = bitcast i8* %93 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %95 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %94, align 8 + %96 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %95, i32 0, i32 0 + %97 = load { %Array*, i64 }*, { %Array*, i64 }** %96, align 8 + %98 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %97, i32 0, i32 0 + %99 = load %Array*, %Array** %98, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + %100 = bitcast { %Array*, i64 }* %97 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %100, i32 -1) + %101 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %95, i32 0, i32 1 + %102 = load { i64, %Callable* }*, { i64, %Callable* }** %101, align 8 + %103 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %102, i32 0, i32 1 + %104 = load %Callable*, %Callable** %103, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %104, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %104, i32 -1) + %105 = bitcast { i64, %Callable* }* %102 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %105, i32 -1) + %106 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %95 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %106, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %107 = add i64 %91, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %89, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %89, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %30, i32 -1) + %108 = call i64 @__quantum__rt__array_get_size_1d(%Array* %88) + %109 = sub i64 %108, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %110 = phi i64 [ 0, %exit__6 ], [ %118, %exiting__7 ] + %111 = icmp sle i64 %110, %109 + br i1 %111, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %88, i64 %110) + %113 = bitcast i8* %112 to { i64, %Callable* }** + %114 = load { i64, %Callable* }*, { i64, %Callable* }** %113, align 8 + %115 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %114, i32 0, i32 1 + %116 = load %Callable*, %Callable** %115, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %116, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %116, i32 -1) + %117 = bitcast { i64, %Callable* }* %114 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %117, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %118 = add i64 %110, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %88, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %89, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %89, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + ret %Array* %88 +} + +define internal void @Microsoft__Quantum__Canon___e2a028c390684ab28246f52a0c3fbae9_Snd__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { { %Array*, i64 }*, { i64, %Callable* }* }* + %1 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %0, i32 0, i32 1 + %3 = load { %Array*, i64 }*, { %Array*, i64 }** %1, align 8 + %4 = load { i64, %Callable* }*, { i64, %Callable* }** %2, align 8 + %5 = call { i64, %Callable* }* @Microsoft__Quantum__Canon___e2a028c390684ab28246f52a0c3fbae9_Snd__body({ %Array*, i64 }* %3, { i64, %Callable* }* %4) + %6 = bitcast %Tuple* %result-tuple to { { i64, %Callable* }* }* + %7 = getelementptr inbounds { { i64, %Callable* }* }, { { i64, %Callable* }* }* %6, i32 0, i32 0 + store { i64, %Callable* }* %5, { i64, %Callable* }** %7, align 8 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___f496bf5745d24576bdaad9599407fb79_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %7 = icmp eq i64 %length, 0 + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %9 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %11 = bitcast i8* %10 to %Array** + %12 = load %Array*, %Array** %11, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %12, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %13, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { %Array* }* + %18 = getelementptr inbounds { %Array* }, { %Array* }* %17, i32 0, i32 0 + %first = load %Array*, %Array** %18, align 8 + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %first) + %20 = sub i64 %19, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %21 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %9 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %8 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %43, %exiting__3 ] + %28 = icmp sle i64 %27, %20 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %first, i64 %27) + %30 = bitcast i8* %29 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %31 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %30, align 8 + %32 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %31, i32 0, i32 0 + %33 = load { %Array*, i64 }*, { %Array*, i64 }** %32, align 8 + %34 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %33, i32 0, i32 0 + %35 = load %Array*, %Array** %34, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + %36 = bitcast { %Array*, i64 }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 1) + %37 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %31, i32 0, i32 1 + %38 = load { i64, %Callable* }*, { i64, %Callable* }** %37, align 8 + %39 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %38, i32 0, i32 1 + %40 = load %Callable*, %Callable** %39, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %40, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %40, i32 1) + %41 = bitcast { i64, %Callable* }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %41, i32 1) + %42 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %43 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 1) + %44 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %45 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %51, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %46) + %49 = bitcast i8* %48 to %Array** + store %Array* %first, %Array** %49, align 8 + %50 = sub i64 %19, 1 + br label %header__5 + +exiting__4: ; preds = %exit__5 + %51 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + store %Array* %44, %Array** %retval, align 8 + %52 = sub i64 %length, 1 + br label %header__6 + +header__5: ; preds = %exiting__5, %body__4 + %53 = phi i64 [ 0, %body__4 ], [ %69, %exiting__5 ] + %54 = icmp sle i64 %53, %50 + br i1 %54, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %first, i64 %53) + %56 = bitcast i8* %55 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %57 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %56, align 8 + %58 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %57, i32 0, i32 0 + %59 = load { %Array*, i64 }*, { %Array*, i64 }** %58, align 8 + %60 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %59, i32 0, i32 0 + %61 = load %Array*, %Array** %60, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %61, i32 1) + %62 = bitcast { %Array*, i64 }* %59 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %62, i32 1) + %63 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %57, i32 0, i32 1 + %64 = load { i64, %Callable* }*, { i64, %Callable* }** %63, align 8 + %65 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %64, i32 0, i32 1 + %66 = load %Callable*, %Callable** %65, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %66, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %66, i32 1) + %67 = bitcast { i64, %Callable* }* %64 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %67, i32 1) + %68 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %68, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %69 = add i64 %53, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %first, i32 1) + br label %exiting__4 + +header__6: ; preds = %exiting__6, %exit__4 + %70 = phi i64 [ 0, %exit__4 ], [ %77, %exiting__6 ] + %71 = icmp sle i64 %70, %52 + br i1 %71, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %72 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %70) + %73 = bitcast i8* %72 to %Array** + %74 = load %Array*, %Array** %73, align 8 + %75 = call i64 @__quantum__rt__array_get_size_1d(%Array* %74) + %76 = sub i64 %75, 1 + br label %header__7 + +exiting__6: ; preds = %exit__7 + %77 = add i64 %70, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + %78 = sub i64 %length, 1 + br label %header__8 + +header__7: ; preds = %exiting__7, %body__6 + %79 = phi i64 [ 0, %body__6 ], [ %95, %exiting__7 ] + %80 = icmp sle i64 %79, %76 + br i1 %80, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %81 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %74, i64 %79) + %82 = bitcast i8* %81 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %83 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %82, align 8 + %84 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %83, i32 0, i32 0 + %85 = load { %Array*, i64 }*, { %Array*, i64 }** %84, align 8 + %86 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %85, i32 0, i32 0 + %87 = load %Array*, %Array** %86, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %87, i32 1) + %88 = bitcast { %Array*, i64 }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %88, i32 1) + %89 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %83, i32 0, i32 1 + %90 = load { i64, %Callable* }*, { i64, %Callable* }** %89, align 8 + %91 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %90, i32 0, i32 1 + %92 = load %Callable*, %Callable** %91, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %92, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %92, i32 1) + %93 = bitcast { i64, %Callable* }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %93, i32 1) + %94 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %83 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %94, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %95 = add i64 %79, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %74, i32 1) + br label %exiting__6 + +header__8: ; preds = %exiting__8, %exit__6 + %idx = phi i64 [ 1, %exit__6 ], [ %113, %exiting__8 ] + %96 = icmp sle i64 %idx, %78 + br i1 %96, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %97 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %97, i32 -1) + %98 = call %Array* @__quantum__rt__array_copy(%Array* %97, i1 false) + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %100 = bitcast i8* %99 to %Array** + %101 = load %Array*, %Array** %100, align 8 + %102 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %103 = bitcast %Tuple* %102 to { %Array* }* + %104 = getelementptr inbounds { %Array* }, { %Array* }* %103, i32 0, i32 0 + store %Array* %101, %Array** %104, align 8 + %105 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %102, %Tuple* %105) + %106 = bitcast %Tuple* %105 to { %Array* }* + %107 = getelementptr inbounds { %Array* }, { %Array* }* %106, i32 0, i32 0 + %108 = load %Array*, %Array** %107, align 8 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %98, i64 %idx) + %110 = bitcast i8* %109 to %Array** + %111 = call i64 @__quantum__rt__array_get_size_1d(%Array* %108) + %112 = sub i64 %111, 1 + br label %header__9 + +exiting__8: ; preds = %exit__13 + %113 = add i64 %idx, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + %114 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %115 = sub i64 %length, 1 + br label %header__14 + +header__9: ; preds = %exiting__9, %body__8 + %116 = phi i64 [ 0, %body__8 ], [ %132, %exiting__9 ] + %117 = icmp sle i64 %116, %112 + br i1 %117, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %116) + %119 = bitcast i8* %118 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %120 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %119, align 8 + %121 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %120, i32 0, i32 0 + %122 = load { %Array*, i64 }*, { %Array*, i64 }** %121, align 8 + %123 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %122, i32 0, i32 0 + %124 = load %Array*, %Array** %123, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %124, i32 1) + %125 = bitcast { %Array*, i64 }* %122 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %125, i32 1) + %126 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %120, i32 0, i32 1 + %127 = load { i64, %Callable* }*, { i64, %Callable* }** %126, align 8 + %128 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %127, i32 0, i32 1 + %129 = load %Callable*, %Callable** %128, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %129, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %129, i32 1) + %130 = bitcast { i64, %Callable* }* %127 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %130, i32 1) + %131 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %120 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %131, i32 1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %132 = add i64 %116, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %108, i32 1) + %133 = sub i64 %111, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %134 = phi i64 [ 0, %exit__9 ], [ %150, %exiting__10 ] + %135 = icmp sle i64 %134, %133 + br i1 %135, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %136 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %134) + %137 = bitcast i8* %136 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %138 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %137, align 8 + %139 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %138, i32 0, i32 0 + %140 = load { %Array*, i64 }*, { %Array*, i64 }** %139, align 8 + %141 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %140, i32 0, i32 0 + %142 = load %Array*, %Array** %141, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %142, i32 1) + %143 = bitcast { %Array*, i64 }* %140 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %143, i32 1) + %144 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %138, i32 0, i32 1 + %145 = load { i64, %Callable* }*, { i64, %Callable* }** %144, align 8 + %146 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %145, i32 0, i32 1 + %147 = load %Callable*, %Callable** %146, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %147, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %147, i32 1) + %148 = bitcast { i64, %Callable* }* %145 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %148, i32 1) + %149 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %138 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %149, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %150 = add i64 %134, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %108, i32 1) + %151 = load %Array*, %Array** %110, align 8 + %152 = call i64 @__quantum__rt__array_get_size_1d(%Array* %151) + %153 = sub i64 %152, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %154 = phi i64 [ 0, %exit__10 ], [ %170, %exiting__11 ] + %155 = icmp sle i64 %154, %153 + br i1 %155, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %156 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %151, i64 %154) + %157 = bitcast i8* %156 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %158 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %157, align 8 + %159 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %158, i32 0, i32 0 + %160 = load { %Array*, i64 }*, { %Array*, i64 }** %159, align 8 + %161 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %160, i32 0, i32 0 + %162 = load %Array*, %Array** %161, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %162, i32 -1) + %163 = bitcast { %Array*, i64 }* %160 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %163, i32 -1) + %164 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %158, i32 0, i32 1 + %165 = load { i64, %Callable* }*, { i64, %Callable* }** %164, align 8 + %166 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %165, i32 0, i32 1 + %167 = load %Callable*, %Callable** %166, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %167, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %167, i32 -1) + %168 = bitcast { i64, %Callable* }* %165 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %168, i32 -1) + %169 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %158 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %169, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %170 = add i64 %154, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %151, i32 -1) + %171 = sub i64 %152, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %172 = phi i64 [ 0, %exit__11 ], [ %188, %exiting__12 ] + %173 = icmp sle i64 %172, %171 + br i1 %173, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %174 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %151, i64 %172) + %175 = bitcast i8* %174 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %176 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %175, align 8 + %177 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %176, i32 0, i32 0 + %178 = load { %Array*, i64 }*, { %Array*, i64 }** %177, align 8 + %179 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %178, i32 0, i32 0 + %180 = load %Array*, %Array** %179, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %180, i32 -1) + %181 = bitcast { %Array*, i64 }* %178 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %181, i32 -1) + %182 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %176, i32 0, i32 1 + %183 = load { i64, %Callable* }*, { i64, %Callable* }** %182, align 8 + %184 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %183, i32 0, i32 1 + %185 = load %Callable*, %Callable** %184, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %185, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %185, i32 -1) + %186 = bitcast { i64, %Callable* }* %183 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %186, i32 -1) + %187 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %176 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %187, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %188 = add i64 %172, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %151, i32 -1) + store %Array* %108, %Array** %110, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %98, i32 1) + store %Array* %98, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %97, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %102, i32 -1) + %189 = sub i64 %111, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %190 = phi i64 [ 0, %exit__12 ], [ %206, %exiting__13 ] + %191 = icmp sle i64 %190, %189 + br i1 %191, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %190) + %193 = bitcast i8* %192 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %194 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %193, align 8 + %195 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %194, i32 0, i32 0 + %196 = load { %Array*, i64 }*, { %Array*, i64 }** %195, align 8 + %197 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %196, i32 0, i32 0 + %198 = load %Array*, %Array** %197, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %198, i32 -1) + %199 = bitcast { %Array*, i64 }* %196 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %199, i32 -1) + %200 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %194, i32 0, i32 1 + %201 = load { i64, %Callable* }*, { i64, %Callable* }** %200, align 8 + %202 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %201, i32 0, i32 1 + %203 = load %Callable*, %Callable** %202, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %203, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %203, i32 -1) + %204 = bitcast { i64, %Callable* }* %201 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %204, i32 -1) + %205 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %194 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %205, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %206 = add i64 %190, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %108, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %105, i32 -1) + br label %exiting__8 + +header__14: ; preds = %exiting__14, %exit__8 + %207 = phi i64 [ 0, %exit__8 ], [ %212, %exiting__14 ] + %208 = icmp sle i64 %207, %115 + br i1 %208, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %209 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %207) + %210 = bitcast i8* %209 to %Array** + %211 = load %Array*, %Array** %210, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %211, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %212 = add i64 %207, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %213 = sub i64 %19, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %214 = phi i64 [ 0, %exit__14 ], [ %230, %exiting__15 ] + %215 = icmp sle i64 %214, %213 + br i1 %215, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %216 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %first, i64 %214) + %217 = bitcast i8* %216 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %218 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %217, align 8 + %219 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %218, i32 0, i32 0 + %220 = load { %Array*, i64 }*, { %Array*, i64 }** %219, align 8 + %221 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %220, i32 0, i32 0 + %222 = load %Array*, %Array** %221, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %222, i32 -1) + %223 = bitcast { %Array*, i64 }* %220 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %223, i32 -1) + %224 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %218, i32 0, i32 1 + %225 = load { i64, %Callable* }*, { i64, %Callable* }** %224, align 8 + %226 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %225, i32 0, i32 1 + %227 = load %Callable*, %Callable** %226, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %227, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %227, i32 -1) + %228 = bitcast { i64, %Callable* }* %225 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %228, i32 -1) + %229 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %218 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %229, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %230 = add i64 %214, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_alias_count(%Array* %first, i32 -1) + %231 = call i64 @__quantum__rt__array_get_size_1d(%Array* %114) + %232 = sub i64 %231, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %233 = phi i64 [ 0, %exit__15 ], [ %240, %exiting__16 ] + %234 = icmp sle i64 %233, %232 + br i1 %234, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %235 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %114, i64 %233) + %236 = bitcast i8* %235 to %Array** + %237 = load %Array*, %Array** %236, align 8 + %238 = call i64 @__quantum__rt__array_get_size_1d(%Array* %237) + %239 = sub i64 %238, 1 + br label %header__17 + +exiting__16: ; preds = %exit__17 + %240 = add i64 %233, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %114, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + %241 = sub i64 %19, 1 + br label %header__18 + +header__17: ; preds = %exiting__17, %body__16 + %242 = phi i64 [ 0, %body__16 ], [ %258, %exiting__17 ] + %243 = icmp sle i64 %242, %239 + br i1 %243, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %237, i64 %242) + %245 = bitcast i8* %244 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %246 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %245, align 8 + %247 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %246, i32 0, i32 0 + %248 = load { %Array*, i64 }*, { %Array*, i64 }** %247, align 8 + %249 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %248, i32 0, i32 0 + %250 = load %Array*, %Array** %249, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %250, i32 -1) + %251 = bitcast { %Array*, i64 }* %248 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %251, i32 -1) + %252 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %246, i32 0, i32 1 + %253 = load { i64, %Callable* }*, { i64, %Callable* }** %252, align 8 + %254 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %253, i32 0, i32 1 + %255 = load %Callable*, %Callable** %254, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %255, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %255, i32 -1) + %256 = bitcast { i64, %Callable* }* %253 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %256, i32 -1) + %257 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %246 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %257, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %258 = add i64 %242, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %237, i32 -1) + br label %exiting__16 + +header__18: ; preds = %exiting__18, %exit__16 + %259 = phi i64 [ 0, %exit__16 ], [ %275, %exiting__18 ] + %260 = icmp sle i64 %259, %241 + br i1 %260, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %261 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %first, i64 %259) + %262 = bitcast i8* %261 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %263 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %262, align 8 + %264 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %263, i32 0, i32 0 + %265 = load { %Array*, i64 }*, { %Array*, i64 }** %264, align 8 + %266 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %265, i32 0, i32 0 + %267 = load %Array*, %Array** %266, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %267, i32 -1) + %268 = bitcast { %Array*, i64 }* %265 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %268, i32 -1) + %269 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %263, i32 0, i32 1 + %270 = load { i64, %Callable* }*, { i64, %Callable* }** %269, align 8 + %271 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %270, i32 0, i32 1 + %272 = load %Callable*, %Callable** %271, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %272, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %272, i32 -1) + %273 = bitcast { i64, %Callable* }* %270 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %273, i32 -1) + %274 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %263 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %274, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %275 = add i64 %259, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_reference_count(%Array* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret %Array* %114 +} + +define internal void @Lifted__PartialApplication__7__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array* }* + %1 = getelementptr inbounds { %Array* }, { %Array* }* %0, i32 0, i32 0 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %4 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 1 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %3, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___8a3dda3255e547b68a0799da4c61f944_Subarray__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Array* @Microsoft__Quantum__Arrays___8a3dda3255e547b68a0799da4c61f944_Subarray__body(%Array* %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %5, %Array** %7, align 8 + ret void +} + +define internal void @MemoryManagement__6__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %11 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %10, align 8 + %12 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %11, i32 0, i32 0 + %13 = load { %Array*, i64 }*, { %Array*, i64 }** %12, align 8 + %14 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %13, i32 0, i32 0 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, i64 }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %11, i32 0, i32 1 + %18 = load { i64, %Callable* }*, { i64, %Callable* }** %17, align 8 + %19 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %18, i32 0, i32 1 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %20, i32 %count-change) + %21 = bitcast { i64, %Callable* }* %18 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 %count-change) + %22 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__6__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %11 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %10, align 8 + %12 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %11, i32 0, i32 0 + %13 = load { %Array*, i64 }*, { %Array*, i64 }** %12, align 8 + %14 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %13, i32 0, i32 0 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 %count-change) + %16 = bitcast { %Array*, i64 }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 %count-change) + %17 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %11, i32 0, i32 1 + %18 = load { i64, %Callable* }*, { i64, %Callable* }** %17, align 8 + %19 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %18, i32 0, i32 1 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 %count-change) + %21 = bitcast { i64, %Callable* }* %18 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 %count-change) + %22 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___924d100a2fcc4f628511e045bf39e089_Chunks__body(i64 %nElements, %Array* %arr) { +entry: + %remaining = alloca %Array*, align 8 + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %arr, i32 1) + %0 = icmp sgt i64 %nElements, 0 + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([27 x i8], [27 x i8]* @20, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %0, %String* %1) + %2 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %2, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %2, i32 1) + store %Array* %arr, %Array** %remaining, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %arr, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %arr, i32 1) + br label %while__1 + +while__1: ; preds = %exit__5, %entry + %3 = load %Array*, %Array** %remaining, align 8 + %4 = call i1 @Microsoft__Quantum__Arrays___cddb1db8090d4b2580514eb678e65fbd_IsEmpty__body(%Array* %3) + %5 = xor i1 %4, true + br i1 %5, label %do__1, label %wend__1 + +do__1: ; preds = %while__1 + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %3) + %nElementsToTake = call i64 @Microsoft__Quantum__Math__MinI__body(i64 %6, i64 %nElements) + %7 = load %Array*, %Array** %output, align 8 + %8 = sub i64 %nElementsToTake, 1 + %9 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %8, 2 + %10 = call %Array* @__quantum__rt__array_slice_1d(%Array* %3, %Range %9, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %10, i32 -1) + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 0) + %13 = bitcast i8* %12 to %Array** + store %Array* %10, %Array** %13, align 8 + %14 = call %Array* @__quantum__rt__array_concatenate(%Array* %7, %Array* %11) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %16 = sub i64 %15, 1 + br label %header__1 + +wend__1: ; preds = %while__1 + %17 = load %Array*, %Array** %output, align 8 + %18 = load %Array*, %Array** %remaining, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %arr, i32 -1) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %20 = sub i64 %19, 1 + br label %header__6 + +header__1: ; preds = %exiting__1, %do__1 + %21 = phi i64 [ 0, %do__1 ], [ %26, %exiting__1 ] + %22 = icmp sle i64 %21, %16 + br i1 %22, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %26 = add i64 %21, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + %27 = sub i64 %15, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %28 = phi i64 [ 0, %exit__1 ], [ %33, %exiting__2 ] + %29 = icmp sle i64 %28, %27 + br i1 %29, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %28) + %31 = bitcast i8* %30 to %Array** + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %28, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %14, i32 1) + %34 = call i64 @__quantum__rt__array_get_size_1d(%Array* %7) + %35 = sub i64 %34, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %36 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 %36) + %39 = bitcast i8* %38 to %Array** + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %36, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %7, i32 -1) + %42 = sub i64 %34, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %43 = phi i64 [ 0, %exit__3 ], [ %48, %exiting__4 ] + %44 = icmp sle i64 %43, %42 + br i1 %44, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 %43) + %46 = bitcast i8* %45 to %Array** + %47 = load %Array*, %Array** %46, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %47, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %48 = add i64 %43, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + store %Array* %14, %Array** %output, align 8 + %49 = sub i64 %6, 1 + %50 = insertvalue %Range zeroinitializer, i64 %nElementsToTake, 0 + %51 = insertvalue %Range %50, i64 1, 1 + %52 = insertvalue %Range %51, i64 %49, 2 + %53 = call %Array* @__quantum__rt__array_slice_1d(%Array* %3, %Range %52, i1 true) + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + store %Array* %53, %Array** %remaining, align 8 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %54 = phi i64 [ 0, %exit__4 ], [ %59, %exiting__5 ] + %55 = icmp sle i64 %54, 0 + br i1 %55, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %54) + %57 = bitcast i8* %56 to %Array** + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %59 = add i64 %54, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %53, i32 -1) + br label %while__1 + +header__6: ; preds = %exiting__6, %wend__1 + %60 = phi i64 [ 0, %wend__1 ], [ %65, %exiting__6 ] + %61 = icmp sle i64 %60, %20 + br i1 %61, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %17, i64 %60) + %63 = bitcast i8* %62 to %Array** + %64 = load %Array*, %Array** %63, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %65 = add i64 %60, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + ret %Array* %17 +} + +define internal %Array* @Microsoft__Quantum__Arrays___bfbc686941de40cda88afed8ead5a62b_Enumerated__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to %Array** + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %9 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___9e4eb8c66a5d41c0ab661fccd1f15c41___QsRef1__Identity____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %11 = call %Array* @Microsoft__Quantum__Arrays___593f6ec0c6174564a8ee8add732e267d_MappedByIndex__body(%Callable* %10, %Array* %array) + %12 = sub i64 %0, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %body__1 + %13 = phi i64 [ 0, %body__1 ], [ %29, %exiting__2 ] + %14 = icmp sle i64 %13, %8 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %13) + %16 = bitcast i8* %15 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %17 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %16, align 8 + %18 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %17, i32 0, i32 0 + %19 = load { %Array*, i64 }*, { %Array*, i64 }** %18, align 8 + %20 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %19, i32 0, i32 0 + %21 = load %Array*, %Array** %20, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + %22 = bitcast { %Array*, i64 }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + %23 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %17, i32 0, i32 1 + %24 = load { i64, %Callable* }*, { i64, %Callable* }** %23, align 8 + %25 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %24, i32 0, i32 1 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 1) + %27 = bitcast { i64, %Callable* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %27, i32 1) + %28 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + br label %exiting__1 + +header__3: ; preds = %exiting__3, %exit__1 + %30 = phi i64 [ 0, %exit__1 ], [ %37, %exiting__3 ] + %31 = icmp sle i64 %30, %12 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %30) + %33 = bitcast i8* %32 to %Array** + %34 = load %Array*, %Array** %33, align 8 + %35 = call i64 @__quantum__rt__array_get_size_1d(%Array* %34) + %36 = sub i64 %35, 1 + br label %header__4 + +exiting__3: ; preds = %exit__4 + %37 = add i64 %30, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret %Array* %11 + +header__4: ; preds = %exiting__4, %body__3 + %38 = phi i64 [ 0, %body__3 ], [ %54, %exiting__4 ] + %39 = icmp sle i64 %38, %36 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %34, i64 %38) + %41 = bitcast i8* %40 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %42 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %41, align 8 + %43 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %42, i32 0, i32 0 + %44 = load { %Array*, i64 }*, { %Array*, i64 }** %43, align 8 + %45 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %44, i32 0, i32 0 + %46 = load %Array*, %Array** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 -1) + %47 = bitcast { %Array*, i64 }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + %48 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %42, i32 0, i32 1 + %49 = load { i64, %Callable* }*, { i64, %Callable* }** %48, align 8 + %50 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %49, i32 0, i32 1 + %51 = load %Callable*, %Callable** %50, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %51, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %51, i32 -1) + %52 = bitcast { i64, %Callable* }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + %53 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %54 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + br label %exiting__3 +} + +define internal { double, { %Array*, %Array*, double }* }* @Microsoft__Quantum__MachineLearning____QsRef0__RunSingleTrainingStep____body(%Array* %miniBatch, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, { %Array*, %Array*, double }* %model) { +entry: + %err = alloca double, align 8 + %batchGradient = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %miniBatch) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %miniBatch, i64 %2) + %5 = bitcast i8* %4 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %6 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %5, align 8 + %7 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %6, i32 0, i32 0 + %8 = load { %Array*, i64 }*, { %Array*, i64 }** %7, align 8 + %9 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %8, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %6, i32 0, i32 1 + %13 = load { i64, %Callable* }*, { i64, %Callable* }** %12, align 8 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 1) + %16 = bitcast { i64, %Callable* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %miniBatch, i32 1) + %19 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 8 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 1) + %21 = bitcast { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %23 = load %Array*, %Array** %22, align 8 + %24 = call i64 @__quantum__rt__array_get_size_1d(%Array* %23) + %25 = sub i64 %24, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %26 = phi i64 [ 0, %exit__1 ], [ %37, %exiting__2 ] + %27 = icmp sle i64 %26, %25 + br i1 %27, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %26) + %29 = bitcast i8* %28 to { { i64, %Array* }*, i2, i64 }** + %30 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %29, align 8 + %31 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %30, i32 0, i32 0 + %32 = load { i64, %Array* }*, { i64, %Array* }** %31, align 8 + %33 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %32, i32 0, i32 1 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 1) + %35 = bitcast { i64, %Array* }* %32 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %35, i32 1) + %36 = bitcast { { i64, %Array* }*, i2, i64 }* %30 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %37 = add i64 %26, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %38 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %39 = load %Array*, %Array** %38, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 1) + %40 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %41 = call i64 @__quantum__rt__array_get_size_1d(%Array* %39) + %42 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %41) + %43 = sub i64 %41, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %44 = phi i64 [ 0, %exit__2 ], [ %48, %exiting__3 ] + %45 = icmp sle i64 %44, %43 + br i1 %45, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %44) + %47 = bitcast i8* %46 to double* + store double 0.000000e+00, double* %47, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %44, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %42, %Array** %batchGradient, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + %49 = call %Array* @Microsoft__Quantum__Arrays___f9184e7d9d864e538f386e594c17e4c1_Enumerated__body(%Array* %miniBatch) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %86, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** + %56 = load { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }*, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** %55, align 8 + %57 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %56, i32 0, i32 0 + %idxSample = load i64, i64* %57, align 4 + %58 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %56, i32 0, i32 1 + %59 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %58, align 8 + %60 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %59, i32 0, i32 0 + %sample = load { %Array*, i64 }*, { %Array*, i64 }** %60, align 8 + %61 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %sample, i32 0, i32 0 + %62 = load %Array*, %Array** %61, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %62, i32 1) + %63 = bitcast { %Array*, i64 }* %sample to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %63, i32 1) + %64 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %59, i32 0, i32 1 + %stateGenerator = load { i64, %Callable* }*, { i64, %Callable* }** %64, align 8 + %65 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %stateGenerator, i32 0, i32 1 + %66 = load %Callable*, %Callable** %65, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %66, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %66, i32 1) + %67 = bitcast { i64, %Callable* }* %stateGenerator to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 1) + %68 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %sample, i32 0, i32 1 + %69 = load i64, i64* %68, align 4 + %70 = sitofp i64 %69 to double + store double %70, double* %err, align 8 + %71 = fcmp olt double %70, 1.000000e+00 + br i1 %71, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__4 + store double -1.000000e+00, double* %err, align 8 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__4 + %72 = load %Callable*, %Callable** %19, align 8 + %73 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([37 x i8], [37 x i8]* @7, i32 0, i32 0)) + %74 = call %String* @__quantum__rt__int_to_string(i64 %idxSample) + %75 = call %String* @__quantum__rt__string_concatenate(%String* %73, %String* %74) + call void @__quantum__rt__string_update_reference_count(%String* %73, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %74, i32 -1) + %76 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([4 x i8], [4 x i8]* @8, i32 0, i32 0)) + %77 = call %String* @__quantum__rt__string_concatenate(%String* %75, %String* %76) + call void @__quantum__rt__string_update_reference_count(%String* %75, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %76, i32 -1) + %78 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %String* }* getelementptr ({ %String* }, { %String* }* null, i32 1) to i64)) + %79 = bitcast %Tuple* %78 to { %String* }* + %80 = getelementptr inbounds { %String* }, { %String* }* %79, i32 0, i32 0 + store %String* %77, %String** %80, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %72, %Tuple* %78, %Tuple* null) + %81 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 3 + %82 = load i64, i64* %81, align 4 + %grad = call %Array* @Microsoft__Quantum__MachineLearning__EstimateGradient__body({ %Array*, %Array*, double }* %model, { i64, %Callable* }* %stateGenerator, i64 %82) + call void @__quantum__rt__array_update_alias_count(%Array* %grad, i32 1) + %83 = load %Array*, %Array** %38, align 8 + %84 = call i64 @__quantum__rt__array_get_size_1d(%Array* %83) + %85 = sub i64 %84, 1 + br label %header__5 + +exiting__4: ; preds = %exit__5 + %86 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %87 = load %Array*, %Array** %batchGradient, align 8 + %88 = call double @Microsoft__Quantum__Math__SquaredNorm__body(%Array* %87) + %89 = call %Tuple* @__quantum__rt__tuple_copy(%Tuple* %40, i1 false) + %90 = bitcast %Tuple* %89 to { %Array*, %Array*, double }* + %91 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %90, i32 0, i32 1 + %92 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Math__PlusD__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %93 = load %Array*, %Array** %38, align 8 + %94 = call %Array* @Microsoft__Quantum__Arrays___1ff548eeaa7940ff923499697de8c6a5_Zipped__body(%Array* %93, %Array* %87) + %95 = call %Array* @Microsoft__Quantum__Arrays___e441983e8fb14fb091112edb0b0083f0_Mapped__body(%Callable* %92, %Array* %94) + store %Array* %95, %Array** %91, align 8 + %96 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %90, i32 0, i32 0 + %97 = load %Array*, %Array** %96, align 8 + %98 = call i64 @__quantum__rt__array_get_size_1d(%Array* %97) + %99 = sub i64 %98, 1 + br label %header__6 + +header__5: ; preds = %exiting__5, %continue__1 + %ip = phi i64 [ 0, %continue__1 ], [ %117, %exiting__5 ] + %100 = icmp sle i64 %ip, %85 + br i1 %100, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %101 = load %Array*, %Array** %batchGradient, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %101, i32 -1) + %102 = call %Array* @__quantum__rt__array_copy(%Array* %101, i1 false) + %103 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %101, i64 %ip) + %104 = bitcast i8* %103 to double* + %105 = load double, double* %104, align 8 + %106 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %options, i32 0, i32 0 + %107 = load double, double* %106, align 8 + %108 = load double, double* %err, align 8 + %109 = fmul double %107, %108 + %110 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %grad, i64 %ip) + %111 = bitcast i8* %110 to double* + %112 = load double, double* %111, align 8 + %113 = fmul double %109, %112 + %114 = fadd double %105, %113 + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 %ip) + %116 = bitcast i8* %115 to double* + store double %114, double* %116, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + store %Array* %102, %Array** %batchGradient, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %101, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %117 = add i64 %ip, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %62, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %63, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %66, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %66, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %67, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %grad, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %77, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %grad, i32 -1) + br label %exiting__4 + +header__6: ; preds = %exiting__6, %exit__4 + %118 = phi i64 [ 0, %exit__4 ], [ %129, %exiting__6 ] + %119 = icmp sle i64 %118, %99 + br i1 %119, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %120 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %97, i64 %118) + %121 = bitcast i8* %120 to { { i64, %Array* }*, i2, i64 }** + %122 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %121, align 8 + %123 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %122, i32 0, i32 0 + %124 = load { i64, %Array* }*, { i64, %Array* }** %123, align 8 + %125 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %124, i32 0, i32 1 + %126 = load %Array*, %Array** %125, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %126, i32 1) + %127 = bitcast { i64, %Array* }* %124 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %127, i32 1) + %128 = bitcast { { i64, %Array* }*, i2, i64 }* %122 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %129 = add i64 %118, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_reference_count(%Array* %97, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %92, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %92, i32 -1) + %130 = call i64 @__quantum__rt__array_get_size_1d(%Array* %94) + %131 = sub i64 %130, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %132 = phi i64 [ 0, %exit__6 ], [ %138, %exiting__7 ] + %133 = icmp sle i64 %132, %131 + br i1 %133, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %134 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %94, i64 %132) + %135 = bitcast i8* %134 to { double, double }** + %136 = load { double, double }*, { double, double }** %135, align 8 + %137 = bitcast { double, double }* %136 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %137, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %138 = add i64 %132, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %94, i32 -1) + %139 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { %Array*, %Array*, double }* }* getelementptr ({ double, { %Array*, %Array*, double }* }, { double, { %Array*, %Array*, double }* }* null, i32 1) to i64)) + %140 = bitcast %Tuple* %139 to { double, { %Array*, %Array*, double }* }* + %141 = getelementptr inbounds { double, { %Array*, %Array*, double }* }, { double, { %Array*, %Array*, double }* }* %140, i32 0, i32 0 + %142 = getelementptr inbounds { double, { %Array*, %Array*, double }* }, { double, { %Array*, %Array*, double }* }* %140, i32 0, i32 1 + store double %88, double* %141, align 8 + store { %Array*, %Array*, double }* %90, { %Array*, %Array*, double }** %142, align 8 + %143 = sub i64 %0, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %144 = phi i64 [ 0, %exit__7 ], [ %160, %exiting__8 ] + %145 = icmp sle i64 %144, %143 + br i1 %145, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %miniBatch, i64 %144) + %147 = bitcast i8* %146 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %148 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %147, align 8 + %149 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %148, i32 0, i32 0 + %150 = load { %Array*, i64 }*, { %Array*, i64 }** %149, align 8 + %151 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %150, i32 0, i32 0 + %152 = load %Array*, %Array** %151, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %152, i32 -1) + %153 = bitcast { %Array*, i64 }* %150 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %153, i32 -1) + %154 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %148, i32 0, i32 1 + %155 = load { i64, %Callable* }*, { i64, %Callable* }** %154, align 8 + %156 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %155, i32 0, i32 1 + %157 = load %Callable*, %Callable** %156, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %157, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %157, i32 -1) + %158 = bitcast { i64, %Callable* }* %155 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %158, i32 -1) + %159 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %148 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %159, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %160 = add i64 %144, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %miniBatch, i32 -1) + %161 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %161, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %161, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + %162 = sub i64 %24, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %163 = phi i64 [ 0, %exit__8 ], [ %174, %exiting__9 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %163) + %166 = bitcast i8* %165 to { { i64, %Array* }*, i2, i64 }** + %167 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %166, align 8 + %168 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %167, i32 0, i32 0 + %169 = load { i64, %Array* }*, { i64, %Array* }** %168, align 8 + %170 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %169, i32 0, i32 1 + %171 = load %Array*, %Array** %170, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %171, i32 -1) + %172 = bitcast { i64, %Array* }* %169 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %172, i32 -1) + %173 = bitcast { { i64, %Array* }*, i2, i64 }* %167 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %173, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %174 = add i64 %163, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %93, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %87, i32 -1) + %175 = sub i64 %50, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %176 = phi i64 [ 0, %exit__9 ], [ %195, %exiting__10 ] + %177 = icmp sle i64 %176, %175 + br i1 %177, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %176) + %179 = bitcast i8* %178 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** + %180 = load { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }*, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** %179, align 8 + %181 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %180, i32 0, i32 1 + %182 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %181, align 8 + %183 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %182, i32 0, i32 0 + %184 = load { %Array*, i64 }*, { %Array*, i64 }** %183, align 8 + %185 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %184, i32 0, i32 0 + %186 = load %Array*, %Array** %185, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %186, i32 -1) + %187 = bitcast { %Array*, i64 }* %184 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %187, i32 -1) + %188 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %182, i32 0, i32 1 + %189 = load { i64, %Callable* }*, { i64, %Callable* }** %188, align 8 + %190 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %189, i32 0, i32 1 + %191 = load %Callable*, %Callable** %190, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %191, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %191, i32 -1) + %192 = bitcast { i64, %Callable* }* %189 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %192, i32 -1) + %193 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %182 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %193, i32 -1) + %194 = bitcast { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %180 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %194, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %195 = add i64 %176, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %87, i32 -1) + ret { double, { %Array*, %Array*, double }* }* %140 +} + +define internal { %Array*, i64 }* @Microsoft__Quantum__Canon___1f5badf5e91544c8bbff3b59164a3bb0_Fst__body({ %Array*, i64 }* %0, { i64, %Callable* }* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }*, { i64, %Callable* }* }* getelementptr ({ { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { { %Array*, i64 }*, { i64, %Callable* }* }* + %3 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %pair, i32 0, i32 1 + store { %Array*, i64 }* %0, { %Array*, i64 }** %3, align 8 + store { i64, %Callable* }* %1, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array*, i64 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 1 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %9, i32 1) + %10 = bitcast { i64, %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %9, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret { %Array*, i64 }* %0 +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__Canon___e2a028c390684ab28246f52a0c3fbae9_Snd__body({ %Array*, i64 }* %0, { i64, %Callable* }* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { %Array*, i64 }*, { i64, %Callable* }* }* getelementptr ({ { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { { %Array*, i64 }*, { i64, %Callable* }* }* + %3 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %pair, i32 0, i32 1 + store { %Array*, i64 }* %0, { %Array*, i64 }** %3, align 8 + store { i64, %Callable* }* %1, { i64, %Callable* }** %4, align 8 + %5 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %0, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array*, i64 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 1 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %9, i32 1) + %10 = bitcast { i64, %Callable* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %9, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___8a3dda3255e547b68a0799da4c61f944_Subarray__body(%Array* %indices, %Array* %array) { +entry: + %sliced = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %6 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %5, align 8 + %7 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %6, i32 0, i32 0 + %8 = load { %Array*, i64 }*, { %Array*, i64 }** %7, align 8 + %9 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %8, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %6, i32 0, i32 1 + %13 = load { i64, %Callable* }*, { i64, %Callable* }** %12, align 8 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 1) + %16 = bitcast { i64, %Callable* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %19 = icmp eq i64 %nSliced, 0 + br i1 %19, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %20 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %21 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %23 = bitcast i8* %22 to i64* + %24 = load i64, i64* %23, align 4 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %24) + %26 = bitcast i8* %25 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %27 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %26, align 8 + %28 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nSliced) + %29 = sub i64 %nSliced, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %30 = phi i64 [ 0, %then0__1 ], [ %46, %exiting__2 ] + %31 = icmp sle i64 %30, %21 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %30) + %33 = bitcast i8* %32 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %34 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %33, align 8 + %35 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %34, i32 0, i32 0 + %36 = load { %Array*, i64 }*, { %Array*, i64 }** %35, align 8 + %37 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %36, i32 0, i32 0 + %38 = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 -1) + %39 = bitcast { %Array*, i64 }* %36 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %39, i32 -1) + %40 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %34, i32 0, i32 1 + %41 = load { i64, %Callable* }*, { i64, %Callable* }** %40, align 8 + %42 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %41, i32 0, i32 1 + %43 = load %Callable*, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %43, i32 -1) + %44 = bitcast { i64, %Callable* }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %44, i32 -1) + %45 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %46 = add i64 %30, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %20 + +header__3: ; preds = %exiting__3, %continue__1 + %47 = phi i64 [ 0, %continue__1 ], [ %62, %exiting__3 ] + %48 = icmp sle i64 %47, %29 + br i1 %48, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %49 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %47) + %50 = bitcast i8* %49 to { { %Array*, i64 }*, { i64, %Callable* }* }** + store { { %Array*, i64 }*, { i64, %Callable* }* }* %27, { { %Array*, i64 }*, { i64, %Callable* }* }** %50, align 8 + %51 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %27, i32 0, i32 0 + %52 = load { %Array*, i64 }*, { %Array*, i64 }** %51, align 8 + %53 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %52, i32 0, i32 0 + %54 = load %Array*, %Array** %53, align 8 + %55 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %27, i32 0, i32 1 + %56 = load { i64, %Callable* }*, { i64, %Callable* }** %55, align 8 + %57 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %56, i32 0, i32 1 + %58 = load %Callable*, %Callable** %57, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 1) + %59 = bitcast { %Array*, i64 }* %52 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %58, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %58, i32 1) + %60 = bitcast { i64, %Callable* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %60, i32 1) + %61 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %61, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %62 = add i64 %47, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %28, %Array** %sliced, align 8 + %63 = sub i64 %nSliced, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %64 = phi i64 [ 0, %exit__3 ], [ %80, %exiting__4 ] + %65 = icmp sle i64 %64, %63 + br i1 %65, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %66 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %64) + %67 = bitcast i8* %66 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %68 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %67, align 8 + %69 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %68, i32 0, i32 0 + %70 = load { %Array*, i64 }*, { %Array*, i64 }** %69, align 8 + %71 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %70, i32 0, i32 0 + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 1) + %73 = bitcast { %Array*, i64 }* %70 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 1) + %74 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %68, i32 0, i32 1 + %75 = load { i64, %Callable* }*, { i64, %Callable* }** %74, align 8 + %76 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %75, i32 0, i32 1 + %77 = load %Callable*, %Callable** %76, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %77, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %77, i32 1) + %78 = bitcast { i64, %Callable* }* %75 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 1) + %79 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %68 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %79, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %80 = add i64 %64, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + %81 = sub i64 %nSliced, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %116, %exiting__5 ] + %82 = icmp sle i64 %idx, %81 + br i1 %82, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %83 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %83, i32 -1) + %84 = call %Array* @__quantum__rt__array_copy(%Array* %83, i1 false) + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %idx) + %86 = bitcast i8* %85 to i64* + %87 = load i64, i64* %86, align 4 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %87) + %89 = bitcast i8* %88 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %90 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %89, align 8 + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %84, i64 %idx) + %92 = bitcast i8* %91 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %93 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %90, i32 0, i32 0 + %94 = load { %Array*, i64 }*, { %Array*, i64 }** %93, align 8 + %95 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %94, i32 0, i32 0 + %96 = load %Array*, %Array** %95, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %96, i32 1) + %97 = bitcast { %Array*, i64 }* %94 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %97, i32 1) + %98 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %90, i32 0, i32 1 + %99 = load { i64, %Callable* }*, { i64, %Callable* }** %98, align 8 + %100 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %99, i32 0, i32 1 + %101 = load %Callable*, %Callable** %100, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %101, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %101, i32 1) + %102 = bitcast { i64, %Callable* }* %99 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %102, i32 1) + %103 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %103, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %96, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %97, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %101, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %101, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %102, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %103, i32 1) + %104 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %92, align 8 + %105 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %104, i32 0, i32 0 + %106 = load { %Array*, i64 }*, { %Array*, i64 }** %105, align 8 + %107 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %106, i32 0, i32 0 + %108 = load %Array*, %Array** %107, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %108, i32 -1) + %109 = bitcast { %Array*, i64 }* %106 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %109, i32 -1) + %110 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %104, i32 0, i32 1 + %111 = load { i64, %Callable* }*, { i64, %Callable* }** %110, align 8 + %112 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %111, i32 0, i32 1 + %113 = load %Callable*, %Callable** %112, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %113, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %113, i32 -1) + %114 = bitcast { i64, %Callable* }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %114, i32 -1) + %115 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %104 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %115, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %108, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %109, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %113, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %113, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %114, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %115, i32 -1) + store { { %Array*, i64 }*, { i64, %Callable* }* }* %90, { { %Array*, i64 }*, { i64, %Callable* }* }** %92, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %84, i32 1) + store %Array* %84, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %116 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %117 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %118 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %119 = phi i64 [ 0, %exit__5 ], [ %135, %exiting__6 ] + %120 = icmp sle i64 %119, %118 + br i1 %120, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %121 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %119) + %122 = bitcast i8* %121 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %123 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %122, align 8 + %124 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %123, i32 0, i32 0 + %125 = load { %Array*, i64 }*, { %Array*, i64 }** %124, align 8 + %126 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %125, i32 0, i32 0 + %127 = load %Array*, %Array** %126, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %127, i32 -1) + %128 = bitcast { %Array*, i64 }* %125 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %128, i32 -1) + %129 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %123, i32 0, i32 1 + %130 = load { i64, %Callable* }*, { i64, %Callable* }** %129, align 8 + %131 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %130, i32 0, i32 1 + %132 = load %Callable*, %Callable** %131, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %132, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %132, i32 -1) + %133 = bitcast { i64, %Callable* }* %130 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %133, i32 -1) + %134 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %123 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %134, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %135 = add i64 %119, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %136 = call i64 @__quantum__rt__array_get_size_1d(%Array* %117) + %137 = sub i64 %136, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %138 = phi i64 [ 0, %exit__6 ], [ %154, %exiting__7 ] + %139 = icmp sle i64 %138, %137 + br i1 %139, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %140 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %117, i64 %138) + %141 = bitcast i8* %140 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %142 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %141, align 8 + %143 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %142, i32 0, i32 0 + %144 = load { %Array*, i64 }*, { %Array*, i64 }** %143, align 8 + %145 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %144, i32 0, i32 0 + %146 = load %Array*, %Array** %145, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %146, i32 -1) + %147 = bitcast { %Array*, i64 }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %147, i32 -1) + %148 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %142, i32 0, i32 1 + %149 = load { i64, %Callable* }*, { i64, %Callable* }** %148, align 8 + %150 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %149, i32 0, i32 1 + %151 = load %Callable*, %Callable** %150, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %151, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %151, i32 -1) + %152 = bitcast { i64, %Callable* }* %149 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %152, i32 -1) + %153 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %142 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %153, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %154 = add i64 %138, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %117, i32 -1) + ret %Array* %117 +} + +define internal %Array* @Microsoft__Quantum__Arrays___f9184e7d9d864e538f386e594c17e4c1_Enumerated__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %18, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %6 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %5, align 8 + %7 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %6, i32 0, i32 0 + %8 = load { %Array*, i64 }*, { %Array*, i64 }** %7, align 8 + %9 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %8, i32 0, i32 0 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { %Array*, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %6, i32 0, i32 1 + %13 = load { i64, %Callable* }*, { i64, %Callable* }** %12, align 8 + %14 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %13, i32 0, i32 1 + %15 = load %Callable*, %Callable** %14, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %15, i32 1) + %16 = bitcast { i64, %Callable* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %18 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %19 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___d0d4b543e4084f10a022319d0e6d7887___QsRef1__Identity____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %20 = call %Array* @Microsoft__Quantum__Arrays___4e18ab692bdc46809cf35e50e230ef2a_MappedByIndex__body(%Callable* %19, %Array* %array) + %21 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %22 = phi i64 [ 0, %exit__1 ], [ %38, %exiting__2 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %22) + %25 = bitcast i8* %24 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %26 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %25, align 8 + %27 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %26, i32 0, i32 0 + %28 = load { %Array*, i64 }*, { %Array*, i64 }** %27, align 8 + %29 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %28, i32 0, i32 0 + %30 = load %Array*, %Array** %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + %31 = bitcast { %Array*, i64 }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 -1) + %32 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %26, i32 0, i32 1 + %33 = load { i64, %Callable* }*, { i64, %Callable* }** %32, align 8 + %34 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %33, i32 0, i32 1 + %35 = load %Callable*, %Callable** %34, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %35, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %35, i32 -1) + %36 = bitcast { i64, %Callable* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + %37 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %37, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %38 = add i64 %22, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + ret %Array* %20 +} + +define internal %Array* @Microsoft__Quantum__MachineLearning__EstimateGradient__body({ %Array*, %Array*, double }* %model, { i64, %Callable* }* %encodedInput, i64 %nMeasurements) { +entry: + %grad = alloca %Array*, align 8 + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + %19 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %encodedInput, i32 0, i32 1 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 1) + %21 = bitcast { i64, %Callable* }* %encodedInput to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %17) + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %22) + %24 = sub i64 %22, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %25) + %28 = bitcast i8* %27 to double* + store double 0.000000e+00, double* %28, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %23, %Array** %grad, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %30 = call i64 @Microsoft__Quantum__MachineLearning__NQubitsRequired__body({ %Array*, %Array*, double }* %model) + %31 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %encodedInput, i32 0, i32 0 + %32 = load i64, i64* %31, align 4 + %nQubits = call i64 @Microsoft__Quantum__Math__MaxI__body(i64 %30, i64 %32) + %33 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %34 = phi i64 [ 0, %exit__2 ], [ %94, %exiting__3 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %34) + %37 = bitcast i8* %36 to { { i64, %Array* }*, i2, i64 }** + %gate = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %37, align 8 + %38 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 0 + %39 = load { i64, %Array* }*, { i64, %Array* }** %38, align 8 + %40 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %39, i32 0, i32 1 + %41 = load %Array*, %Array** %40, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 1) + %42 = bitcast { i64, %Array* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 1) + %43 = bitcast { { i64, %Array* }*, i2, i64 }* %gate to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 1) + %44 = load %Array*, %Array** %16, align 8 + %45 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %45, i64 0) + %47 = bitcast i8* %46 to double* + store double 0.000000e+00, double* %47, align 8 + %48 = call %Array* @__quantum__rt__array_concatenate(%Array* %44, %Array* %45) + call void @__quantum__rt__array_update_reference_count(%Array* %48, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %48, i32 -1) + %paramShift = call %Array* @__quantum__rt__array_copy(%Array* %48, i1 false) + %49 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %gate, i32 0, i32 2 + %50 = load i64, i64* %49, align 4 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %50) + %52 = bitcast i8* %51 to double* + %53 = load double, double* %52, align 8 + %54 = call double @Microsoft__Quantum__Math__PI__body() + %55 = fadd double %53, %54 + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paramShift, i64 %50) + %57 = bitcast i8* %56 to double* + store double %55, double* %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paramShift, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paramShift, i32 1) + %58 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %59 = bitcast %Tuple* %58 to { %Array*, %Array* }* + %60 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %59, i32 0, i32 0 + %61 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %59, i32 0, i32 1 + store %Array* %44, %Array** %60, align 8 + store %Array* %paramShift, %Array** %61, align 8 + %newDer = call double @Microsoft__Quantum__MachineLearning____QsRef0__EstimateDerivativeWithParameterShift____body({ i64, %Callable* }* %encodedInput, { %Array*, %Array*, double }* %model, { %Array*, %Array* }* %59, i64 %nQubits, i64 %nMeasurements) + %62 = call i1 @Microsoft__Quantum__Arrays___cddb1db8090d4b2580514eb678e65fbd_IsEmpty__body(%Array* %41) + br i1 %62, label %then0__1, label %else__1 + +then0__1: ; preds = %body__3 + %63 = load %Array*, %Array** %grad, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %63, i32 -1) + %64 = call %Array* @__quantum__rt__array_copy(%Array* %63, i1 false) + %65 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %63, i64 %50) + %66 = bitcast i8* %65 to double* + %67 = load double, double* %66, align 8 + %68 = fadd double %67, %newDer + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %64, i64 %50) + %70 = bitcast i8* %69 to double* + store double %68, double* %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %64, i32 1) + store %Array* %64, %Array** %grad, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %63, i32 -1) + br label %continue__1 + +else__1: ; preds = %body__3 + %controlledShift = call %Array* @__quantum__rt__array_copy(%Array* %paramShift, i1 false) + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %50) + %72 = bitcast i8* %71 to double* + %73 = load double, double* %72, align 8 + %74 = call double @Microsoft__Quantum__Math__PI__body() + %75 = fmul double 3.000000e+00, %74 + %76 = fadd double %73, %75 + %77 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %controlledShift, i64 %50) + %78 = bitcast i8* %77 to double* + store double %76, double* %78, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlledShift, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlledShift, i32 1) + %79 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %80 = bitcast %Tuple* %79 to { %Array*, %Array* }* + %81 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %80, i32 0, i32 0 + %82 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %80, i32 0, i32 1 + store %Array* %44, %Array** %81, align 8 + store %Array* %controlledShift, %Array** %82, align 8 + %newDer1 = call double @Microsoft__Quantum__MachineLearning____QsRef0__EstimateDerivativeWithParameterShift____body({ i64, %Callable* }* %encodedInput, { %Array*, %Array*, double }* %model, { %Array*, %Array* }* %80, i64 %nQubits, i64 %nMeasurements) + %83 = load %Array*, %Array** %grad, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %83, i32 -1) + %84 = call %Array* @__quantum__rt__array_copy(%Array* %83, i1 false) + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %50) + %86 = bitcast i8* %85 to double* + %87 = load double, double* %86, align 8 + %88 = fsub double %newDer, %newDer1 + %89 = fmul double 5.000000e-01, %88 + %90 = fadd double %87, %89 + %91 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %84, i64 %50) + %92 = bitcast i8* %91 to double* + %93 = load double, double* %92, align 8 + store double %90, double* %92, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %84, i32 1) + store %Array* %84, %Array** %grad, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controlledShift, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlledShift, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlledShift, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %42, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paramShift, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %48, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paramShift, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paramShift, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %58, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %continue__1 + %94 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %95 = load %Array*, %Array** %grad, align 8 + %96 = sub i64 %2, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %97 = phi i64 [ 0, %exit__3 ], [ %108, %exiting__4 ] + %98 = icmp sle i64 %97, %96 + br i1 %98, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %99 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %97) + %100 = bitcast i8* %99 to { { i64, %Array* }*, i2, i64 }** + %101 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %100, align 8 + %102 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %101, i32 0, i32 0 + %103 = load { i64, %Array* }*, { i64, %Array* }** %102, align 8 + %104 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %103, i32 0, i32 1 + %105 = load %Array*, %Array** %104, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %105, i32 -1) + %106 = bitcast { i64, %Array* }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %106, i32 -1) + %107 = bitcast { { i64, %Array* }*, i2, i64 }* %101 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %107, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %108 = add i64 %97, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + %109 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %109, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %95, i32 -1) + ret %Array* %95 +} + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +define internal double @Microsoft__Quantum__Math__SquaredNorm__body(%Array* %array) { +entry: + %ret = alloca double, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store double 0.000000e+00, double* %ret, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to double* + %element = load double, double* %5, align 8 + %6 = load double, double* %ret, align 8 + %7 = fmul double %element, %element + %8 = fadd double %6, %7 + store double %8, double* %ret, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %10 = load double, double* %ret, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret double %10 +} + +define internal %Array* @Microsoft__Quantum__Arrays___e441983e8fb14fb091112edb0b0083f0_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { double, double }** + %5 = load { double, double }*, { double, double }** %4, align 8 + %6 = bitcast { double, double }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp eq i64 %length, 0 + br i1 %8, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %10 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %12 = bitcast i8* %11 to { double, double }** + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %14, %Tuple* %15) + %16 = bitcast %Tuple* %15 to { double }* + %17 = getelementptr inbounds { double }, { double }* %16, i32 0, i32 0 + %first = load double, double* %17, align 8 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %19 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %10 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %9 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %27) + %30 = bitcast i8* %29 to double* + store double %first, double* %30, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %46, %exiting__4 ] + %33 = icmp sle i64 %idx, %32 + br i1 %33, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %34 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %37 = bitcast i8* %36 to { double, double }** + %38 = load { double, double }*, { double, double }** %37, align 8 + %39 = bitcast { double, double }* %38 to %Tuple* + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %39, %Tuple* %40) + %41 = bitcast %Tuple* %40 to { double }* + %42 = getelementptr inbounds { double }, { double }* %41, i32 0, i32 0 + %43 = load double, double* %42, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %idx) + %45 = bitcast i8* %44 to double* + store double %43, double* %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %48 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %49 = phi i64 [ 0, %exit__4 ], [ %55, %exiting__5 ] + %50 = icmp sle i64 %49, %48 + br i1 %50, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %49) + %52 = bitcast i8* %51 to { double, double }** + %53 = load { double, double }*, { double, double }** %52, align 8 + %54 = bitcast { double, double }* %53 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %55 = add i64 %49, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Array* %47 +} + +define internal void @Microsoft__Quantum__Math__PlusD__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call double @Microsoft__Quantum__Math__PlusD__body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %5, double* %7, align 8 + ret void +} + +define internal double @Microsoft__Quantum__Math__PlusD__body(double %a, double %b) { +entry: + %0 = fadd double %a, %b + ret double %0 +} + +define internal %Callable* @Microsoft__Quantum__MachineLearning____QsRef0__TailMeasurement____body(i64 %nQubits) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %nQubits) + %1 = sub i64 %nQubits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %paulis = call %Array* @__quantum__rt__array_copy(%Array* %0, i1 false) + %7 = sub i64 %nQubits, 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 %7) + %9 = bitcast i8* %8 to i2* + store i2 -2, i2* %9, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Measure__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, %Array* }* + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %12, i32 0, i32 1 + store %Callable* %10, %Callable** %13, align 8 + store %Array* %paulis, %Array** %14, align 8 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__8__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__7__FunctionTable, %Tuple* %11) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + ret %Callable* %15 +} + +define internal void @Lifted__PartialApplication__8__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Measure__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = call %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { %Result* }* + %7 = getelementptr inbounds { %Result* }, { %Result* }* %6, i32 0, i32 0 + store %Result* %5, %Result** %7, align 8 + ret void +} + +define internal void @MemoryManagement__7__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__7__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +define internal %Array* @Microsoft__Quantum__MachineLearning____QsRef0__Unnegate____body(%Array* %negLocs, %Array* %coefficients) { +entry: + %ret = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + store %Array* %coefficients, %Array** %ret, align 8 + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %10) + %13 = bitcast i8* %12 to { double, double }** + %14 = load { double, double }*, { double, double }** %13, align 8 + %15 = bitcast { double, double }* %14 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %17 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %18 = phi i64 [ 0, %exit__2 ], [ %24, %exiting__3 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %18) + %21 = bitcast i8* %20 to { double, double }** + %22 = load { double, double }*, { double, double }** %21, align 8 + %23 = bitcast { double, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %24 = add i64 %18, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %negLocs) + %26 = sub i64 %25, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %27 = phi i64 [ 0, %exit__3 ], [ %56, %exiting__4 ] + %28 = icmp sle i64 %27, %26 + br i1 %28, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %negLocs, i64 %27) + %30 = bitcast i8* %29 to i64* + %idxNegative = load i64, i64* %30, align 4 + %31 = icmp sge i64 %idxNegative, %0 + br i1 %31, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__4 + %32 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([31 x i8], [31 x i8]* @9, i32 0, i32 0)) + %33 = call %String* @__quantum__rt__int_to_string(i64 %idxNegative) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([8 x i8], [8 x i8]* @10, i32 0, i32 0)) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + %37 = call %String* @__quantum__rt__int_to_string(i64 %0) + %38 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([29 x i8], [29 x i8]* @11, i32 0, i32 0)) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + %41 = load %Array*, %Array** %ret, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 -1) + %42 = sub i64 %0, 1 + br label %header__5 + +continue__1: ; preds = %body__4 + %43 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxNegative) + %44 = bitcast i8* %43 to { double, double }** + %coefficient = load { double, double }*, { double, double }** %44, align 8 + %45 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 1) + %46 = load %Array*, %Array** %ret, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 -1) + %47 = call %Array* @__quantum__rt__array_copy(%Array* %46, i1 false) + %48 = getelementptr inbounds { double, double }, { double, double }* %coefficient, i32 0, i32 0 + %49 = load double, double* %48, align 8 + %50 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %49, double 0.000000e+00) + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %47, i64 %idxNegative) + %52 = bitcast i8* %51 to { double, double }** + %53 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 1) + %54 = load { double, double }*, { double, double }** %52, align 8 + %55 = bitcast { double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %55, i32 -1) + store { double, double }* %50, { double, double }** %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 1) + store %Array* %47, %Array** %ret, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %continue__1 + %56 = add i64 %27, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %57 = load %Array*, %Array** %ret, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %negLocs, i32 -1) + %58 = sub i64 %0, 1 + br label %header__8 + +header__5: ; preds = %exiting__5, %then0__1 + %59 = phi i64 [ 0, %then0__1 ], [ %65, %exiting__5 ] + %60 = icmp sle i64 %59, %42 + br i1 %60, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %59) + %62 = bitcast i8* %61 to { double, double }** + %63 = load { double, double }*, { double, double }** %62, align 8 + %64 = bitcast { double, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %64, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %65 = add i64 %59, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %41) + %67 = sub i64 %66, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %68 = phi i64 [ 0, %exit__5 ], [ %74, %exiting__6 ] + %69 = icmp sle i64 %68, %67 + br i1 %69, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %68) + %71 = bitcast i8* %70 to { double, double }** + %72 = load { double, double }*, { double, double }** %71, align 8 + %73 = bitcast { double, double }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %74 = add i64 %68, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %41, i32 -1) + %75 = sub i64 %66, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %76 = phi i64 [ 0, %exit__6 ], [ %82, %exiting__7 ] + %77 = icmp sle i64 %76, %75 + br i1 %77, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %78 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %41, i64 %76) + %79 = bitcast i8* %78 to { double, double }** + %80 = load { double, double }*, { double, double }** %79, align 8 + %81 = bitcast { double, double }* %80 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %81, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %82 = add i64 %76, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + call void @__quantum__rt__fail(%String* %40) + unreachable + +header__8: ; preds = %exiting__8, %exit__4 + %83 = phi i64 [ 0, %exit__4 ], [ %89, %exiting__8 ] + %84 = icmp sle i64 %83, %58 + br i1 %84, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %85 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %83) + %86 = bitcast i8* %85 to { double, double }** + %87 = load { double, double }*, { double, double }** %86, align 8 + %88 = bitcast { double, double }* %87 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %88, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %89 = add i64 %83, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %90 = call i64 @__quantum__rt__array_get_size_1d(%Array* %57) + %91 = sub i64 %90, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %92 = phi i64 [ 0, %exit__8 ], [ %98, %exiting__9 ] + %93 = icmp sle i64 %92, %91 + br i1 %93, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %94 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %57, i64 %92) + %95 = bitcast i8* %94 to { double, double }** + %96 = load { double, double }*, { double, double }** %95, align 8 + %97 = bitcast { double, double }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %97, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %98 = add i64 %92, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + ret %Array* %57 +} + +declare void @__quantum__rt__fail(%String*) + +define internal { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %Magnitude, double %Argument) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double }* + %2 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double }, { double, double }* %1, i32 0, i32 1 + store double %Magnitude, double* %2, align 8 + store double %Argument, double* %3, align 8 + ret { double, double }* %1 +} + +define internal { double, double, i64 }* @Microsoft__Quantum__Optimization__LocalUnivariateMinimum__body(%Callable* %fn, { double, double }* %bounds, double %tolerance) { +entry: + %queryAmount = alloca i64, align 8 + %rightProbe = alloca { double, double }*, align 8 + %leftProbe = alloca { double, double }*, align 8 + %interval = alloca { double, double }*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %fn, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %fn, i32 1) + %0 = bitcast { double, double }* %bounds to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + store { double, double }* %bounds, { double, double }** %interval, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %bounds, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %bounds, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call { double, double }* @Microsoft__Quantum__Optimization____QsRef1__NextProbes____body(double %3, double %4) + %6 = getelementptr inbounds { double, double }, { double, double }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { double, double }, { double, double }* %5, i32 0, i32 1 + %8 = load double, double* %6, align 8 + %9 = load double, double* %7, align 8 + %10 = call double @Microsoft__Quantum__Canon___17f71f9fa0da472d90e5bc66ed171ba5_Fst__body(double %8, double %9) + %11 = call { double, double }* @Microsoft__Quantum__Optimization____QsRef1__ProbeValue____body(%Callable* %fn, double %10) + store { double, double }* %11, { double, double }** %leftProbe, align 8 + %12 = bitcast { double, double }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call { double, double }* @Microsoft__Quantum__Optimization____QsRef1__NextProbes____body(double %3, double %4) + %14 = getelementptr inbounds { double, double }, { double, double }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, double }, { double, double }* %13, i32 0, i32 1 + %16 = load double, double* %14, align 8 + %17 = load double, double* %15, align 8 + %18 = call double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %16, double %17) + %19 = call { double, double }* @Microsoft__Quantum__Optimization____QsRef1__ProbeValue____body(%Callable* %fn, double %18) + store { double, double }* %19, { double, double }** %rightProbe, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + store i64 2, i64* %queryAmount, align 4 + br label %while__1 + +while__1: ; preds = %continue__1, %entry + %21 = load { double, double }*, { double, double }** %interval, align 8 + %22 = getelementptr inbounds { double, double }, { double, double }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, double }, { double, double }* %21, i32 0, i32 1 + %24 = load double, double* %22, align 8 + %25 = load double, double* %23, align 8 + %26 = call double @Microsoft__Quantum__Optimization____QsRef1__Width____body(double %24, double %25) + %27 = fcmp ogt double %26, %tolerance + br i1 %27, label %do__1, label %wend__1 + +do__1: ; preds = %while__1 + %28 = load { double, double }*, { double, double }** %leftProbe, align 8 + %29 = getelementptr inbounds { double, double }, { double, double }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { double, double }, { double, double }* %28, i32 0, i32 1 + %31 = load double, double* %29, align 8 + %32 = load double, double* %30, align 8 + %33 = call double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %31, double %32) + %34 = load { double, double }*, { double, double }** %rightProbe, align 8 + %35 = getelementptr inbounds { double, double }, { double, double }* %34, i32 0, i32 0 + %36 = getelementptr inbounds { double, double }, { double, double }* %34, i32 0, i32 1 + %37 = load double, double* %35, align 8 + %38 = load double, double* %36, align 8 + %39 = call double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %37, double %38) + %40 = fcmp olt double %33, %39 + br i1 %40, label %then0__1, label %else__1 + +then0__1: ; preds = %do__1 + %41 = call double @Microsoft__Quantum__Canon___17f71f9fa0da472d90e5bc66ed171ba5_Fst__body(double %24, double %25) + %42 = call double @Microsoft__Quantum__Canon___17f71f9fa0da472d90e5bc66ed171ba5_Fst__body(double %37, double %38) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { double, double }* + %45 = getelementptr inbounds { double, double }, { double, double }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { double, double }, { double, double }* %44, i32 0, i32 1 + store double %41, double* %45, align 8 + store double %42, double* %46, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %43, i32 1) + %47 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { double, double }* %44, { double, double }** %interval, align 8 + %48 = bitcast { double, double }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 1) + %49 = bitcast { double, double }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %49, i32 -1) + store { double, double }* %28, { double, double }** %rightProbe, align 8 + %50 = call { double, double }* @Microsoft__Quantum__Optimization____QsRef1__NextProbes____body(double %41, double %42) + %51 = getelementptr inbounds { double, double }, { double, double }* %50, i32 0, i32 0 + %52 = getelementptr inbounds { double, double }, { double, double }* %50, i32 0, i32 1 + %53 = load double, double* %51, align 8 + %54 = load double, double* %52, align 8 + %55 = call double @Microsoft__Quantum__Canon___17f71f9fa0da472d90e5bc66ed171ba5_Fst__body(double %53, double %54) + %56 = call { double, double }* @Microsoft__Quantum__Optimization____QsRef1__ProbeValue____body(%Callable* %fn, double %55) + %57 = bitcast { double, double }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %48, i32 -1) + store { double, double }* %56, { double, double }** %leftProbe, align 8 + %58 = bitcast { double, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %58, i32 -1) + br label %continue__1 + +else__1: ; preds = %do__1 + %59 = load { double, double }*, { double, double }** %leftProbe, align 8 + %60 = getelementptr inbounds { double, double }, { double, double }* %59, i32 0, i32 0 + %61 = getelementptr inbounds { double, double }, { double, double }* %59, i32 0, i32 1 + %62 = load double, double* %60, align 8 + %63 = load double, double* %61, align 8 + %64 = call double @Microsoft__Quantum__Canon___17f71f9fa0da472d90e5bc66ed171ba5_Fst__body(double %62, double %63) + %65 = load { double, double }*, { double, double }** %interval, align 8 + %66 = getelementptr inbounds { double, double }, { double, double }* %65, i32 0, i32 0 + %67 = getelementptr inbounds { double, double }, { double, double }* %65, i32 0, i32 1 + %68 = load double, double* %66, align 8 + %69 = load double, double* %67, align 8 + %70 = call double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %68, double %69) + %71 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %72 = bitcast %Tuple* %71 to { double, double }* + %73 = getelementptr inbounds { double, double }, { double, double }* %72, i32 0, i32 0 + %74 = getelementptr inbounds { double, double }, { double, double }* %72, i32 0, i32 1 + store double %64, double* %73, align 8 + store double %70, double* %74, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %71, i32 1) + %75 = bitcast { double, double }* %65 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %75, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %75, i32 -1) + store { double, double }* %72, { double, double }** %interval, align 8 + %76 = load { double, double }*, { double, double }** %rightProbe, align 8 + %77 = bitcast { double, double }* %76 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 1) + %78 = bitcast { double, double }* %59 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i32 -1) + store { double, double }* %76, { double, double }** %leftProbe, align 8 + %79 = call { double, double }* @Microsoft__Quantum__Optimization____QsRef1__NextProbes____body(double %64, double %70) + %80 = getelementptr inbounds { double, double }, { double, double }* %79, i32 0, i32 0 + %81 = getelementptr inbounds { double, double }, { double, double }* %79, i32 0, i32 1 + %82 = load double, double* %80, align 8 + %83 = load double, double* %81, align 8 + %84 = call double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %82, double %83) + %85 = call { double, double }* @Microsoft__Quantum__Optimization____QsRef1__ProbeValue____body(%Callable* %fn, double %84) + %86 = bitcast { double, double }* %85 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %86, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %77, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %77, i32 -1) + store { double, double }* %85, { double, double }** %rightProbe, align 8 + %87 = bitcast { double, double }* %79 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %87, i32 -1) + br label %continue__1 + +continue__1: ; preds = %else__1, %then0__1 + %88 = load i64, i64* %queryAmount, align 4 + %89 = add i64 %88, 1 + store i64 %89, i64* %queryAmount, align 4 + br label %while__1 + +wend__1: ; preds = %while__1 + %90 = load { double, double }*, { double, double }** %leftProbe, align 8 + %91 = getelementptr inbounds { double, double }, { double, double }* %90, i32 0, i32 0 + %92 = getelementptr inbounds { double, double }, { double, double }* %90, i32 0, i32 1 + %93 = load double, double* %91, align 8 + %94 = load double, double* %92, align 8 + %95 = call double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %93, double %94) + %96 = load { double, double }*, { double, double }** %rightProbe, align 8 + %97 = getelementptr inbounds { double, double }, { double, double }* %96, i32 0, i32 0 + %98 = getelementptr inbounds { double, double }, { double, double }* %96, i32 0, i32 1 + %99 = load double, double* %97, align 8 + %100 = load double, double* %98, align 8 + %101 = call double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %99, double %100) + %102 = fcmp olt double %95, %101 + br i1 %102, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %wend__1 + %103 = bitcast { double, double }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %103, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %wend__1 + %104 = bitcast { double, double }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %104, i32 1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %result = phi { double, double }* [ %90, %condTrue__1 ], [ %96, %condFalse__1 ] + %105 = bitcast { double, double }* %result to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %105, i32 1) + %106 = getelementptr inbounds { double, double }, { double, double }* %result, i32 0, i32 0 + %107 = getelementptr inbounds { double, double }, { double, double }* %result, i32 0, i32 1 + %108 = load double, double* %106, align 8 + %109 = load double, double* %107, align 8 + %110 = call double @Microsoft__Quantum__Canon___17f71f9fa0da472d90e5bc66ed171ba5_Fst__body(double %108, double %109) + %111 = call double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %108, double %109) + %112 = load i64, i64* %queryAmount, align 4 + %113 = call { double, double, i64 }* @Microsoft__Quantum__Optimization__UnivariateOptimizationResult__body(double %110, double %111, i64 %112) + %114 = load { double, double }*, { double, double }** %interval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %fn, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %fn, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + %115 = bitcast { double, double }* %114 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %115, i32 -1) + %116 = bitcast { double, double }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %116, i32 -1) + %117 = bitcast { double, double }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %117, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %105, i32 -1) + %118 = bitcast { double, double }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %118, i32 -1) + %119 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %119, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %105, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %115, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %116, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %117, i32 -1) + ret { double, double, i64 }* %113 +} + +define internal void @Lifted__PartialApplication__9__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { double }* + %6 = getelementptr inbounds { double }, { double }* %5, i32 0, i32 0 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, double }* getelementptr ({ %Array*, %Array*, double }, { %Array*, %Array*, double }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array*, double }* + %10 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %9, i32 0, i32 2 + store %Array* %2, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + store double %7, double* %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__MisclassificationRate____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array*, double }* + %1 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load double, double* %3, align 8 + %7 = call double @Microsoft__Quantum__MachineLearning____QsRef0__MisclassificationRate____body(%Array* %4, %Array* %5, double %6) + %8 = bitcast %Tuple* %result-tuple to { double }* + %9 = getelementptr inbounds { double }, { double }* %8, i32 0, i32 0 + store double %7, double* %9, align 8 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___2a89d4e05ab447e5a736535efa7cd8e7_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { double, i64 }** + %5 = load { double, i64 }*, { double, i64 }** %4, align 8 + %6 = bitcast { double, i64 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp eq i64 %length, 0 + br i1 %8, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %10 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %12 = bitcast i8* %11 to { double, i64 }** + %13 = load { double, i64 }*, { double, i64 }** %12, align 8 + %14 = bitcast { double, i64 }* %13 to %Tuple* + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %14, %Tuple* %15) + %16 = bitcast %Tuple* %15 to { double }* + %17 = getelementptr inbounds { double }, { double }* %16, i32 0, i32 0 + %first = load double, double* %17, align 8 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %19 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %10 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { double, i64 }** + %24 = load { double, i64 }*, { double, i64 }** %23, align 8 + %25 = bitcast { double, i64 }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %9 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %27) + %30 = bitcast i8* %29 to double* + store double %first, double* %30, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %46, %exiting__4 ] + %33 = icmp sle i64 %idx, %32 + br i1 %33, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %34 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %37 = bitcast i8* %36 to { double, i64 }** + %38 = load { double, i64 }*, { double, i64 }** %37, align 8 + %39 = bitcast { double, i64 }* %38 to %Tuple* + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %39, %Tuple* %40) + %41 = bitcast %Tuple* %40 to { double }* + %42 = getelementptr inbounds { double }, { double }* %41, i32 0, i32 0 + %43 = load double, double* %42, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %idx) + %45 = bitcast i8* %44 to double* + store double %43, double* %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %48 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %49 = phi i64 [ 0, %exit__4 ], [ %55, %exiting__5 ] + %50 = icmp sle i64 %49, %48 + br i1 %50, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %49) + %52 = bitcast i8* %51 to { double, i64 }** + %53 = load { double, i64 }*, { double, i64 }** %52, align 8 + %54 = bitcast { double, i64 }* %53 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %55 = add i64 %49, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Array* %47 +} + +define internal void @Microsoft__Quantum__Canon___72deeddd84a741deba305c641ccbb494_Fst__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64 }* + %1 = getelementptr inbounds { double, i64 }, { double, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64 }, { double, i64 }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load i64, i64* %2, align 4 + %5 = call double @Microsoft__Quantum__Canon___72deeddd84a741deba305c641ccbb494_Fst__body(double %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %5, double* %7, align 8 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___1cabe7eb60764be98bab0923a4277ae6_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { double, i64 }** + %5 = load { double, i64 }*, { double, i64 }** %4, align 8 + %6 = bitcast { double, i64 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp eq i64 %length, 0 + br i1 %8, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %10 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %12 = bitcast i8* %11 to { double, i64 }** + %13 = load { double, i64 }*, { double, i64 }** %12, align 8 + %14 = bitcast { double, i64 }* %13 to %Tuple* + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %14, %Tuple* %15) + %16 = bitcast %Tuple* %15 to { i64 }* + %17 = getelementptr inbounds { i64 }, { i64 }* %16, i32 0, i32 0 + %first = load i64, i64* %17, align 4 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %19 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %10 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { double, i64 }** + %24 = load { double, i64 }*, { double, i64 }** %23, align 8 + %25 = bitcast { double, i64 }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %9 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %27) + %30 = bitcast i8* %29 to i64* + store i64 %first, i64* %30, align 4 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %46, %exiting__4 ] + %33 = icmp sle i64 %idx, %32 + br i1 %33, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %34 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %37 = bitcast i8* %36 to { double, i64 }** + %38 = load { double, i64 }*, { double, i64 }** %37, align 8 + %39 = bitcast { double, i64 }* %38 to %Tuple* + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %39, %Tuple* %40) + %41 = bitcast %Tuple* %40 to { i64 }* + %42 = getelementptr inbounds { i64 }, { i64 }* %41, i32 0, i32 0 + %43 = load i64, i64* %42, align 4 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %idx) + %45 = bitcast i8* %44 to i64* + store i64 %43, i64* %45, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %48 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %49 = phi i64 [ 0, %exit__4 ], [ %55, %exiting__5 ] + %50 = icmp sle i64 %49, %48 + br i1 %50, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %49) + %52 = bitcast i8* %51 to { double, i64 }** + %53 = load { double, i64 }*, { double, i64 }** %52, align 8 + %54 = bitcast { double, i64 }* %53 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %55 = add i64 %49, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Array* %47 +} + +define internal void @Microsoft__Quantum__Canon___ce11ecc402da481dad234c6ec2301ce8_Snd__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64 }* + %1 = getelementptr inbounds { double, i64 }, { double, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64 }, { double, i64 }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load i64, i64* %2, align 4 + %5 = call i64 @Microsoft__Quantum__Canon___ce11ecc402da481dad234c6ec2301ce8_Snd__body(double %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %5, i64* %7, align 4 + ret void +} + +define internal void @MemoryManagement__8__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__8__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Canon___72deeddd84a741deba305c641ccbb494_Fst__body(double %0, i64 %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64 }* getelementptr ({ double, i64 }, { double, i64 }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { double, i64 }* + %3 = getelementptr inbounds { double, i64 }, { double, i64 }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { double, i64 }, { double, i64 }* %pair, i32 0, i32 1 + store double %0, double* %3, align 8 + store i64 %1, i64* %4, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret double %0 +} + +define internal i64 @Microsoft__Quantum__Canon___ce11ecc402da481dad234c6ec2301ce8_Snd__body(double %0, i64 %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64 }* getelementptr ({ double, i64 }, { double, i64 }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { double, i64 }* + %3 = getelementptr inbounds { double, i64 }, { double, i64 }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { double, i64 }, { double, i64 }* %pair, i32 0, i32 1 + store double %0, double* %3, align 8 + store i64 %1, i64* %4, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret i64 %1 +} + +define internal i1 @Microsoft__Quantum__Arrays___cddb1db8090d4b2580514eb678e65fbd_IsEmpty__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp eq i64 %0, 0 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___93b43ff3c247411b9de017f51b2344c9_Subarray__body(%Array* %indices, %Array* %array) { +entry: + %sliced = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %0 = icmp eq i64 %nSliced, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %3 = bitcast i8* %2 to i64* + %4 = load i64, i64* %3, align 4 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %4) + %6 = bitcast i8* %5 to %Qubit** + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nSliced) + %9 = sub i64 %nSliced, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %10 = phi i64 [ 0, %continue__1 ], [ %14, %exiting__1 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to %Qubit** + store %Qubit* %7, %Qubit** %13, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %10, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %8, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %nSliced, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %27, %exiting__2 ] + %16 = icmp sle i64 %idx, %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %idx) + %20 = bitcast i8* %19 to i64* + %21 = load i64, i64* %20, align 4 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %23 = bitcast i8* %22 to %Qubit** + %24 = load %Qubit*, %Qubit** %23, align 8 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idx) + %26 = bitcast i8* %25 to %Qubit** + store %Qubit* %24, %Qubit** %26, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %28 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 -1) + ret %Array* %28 +} + +define internal %Array* @Microsoft__Quantum__Arrays___0e6bc4a124064ccaaf317888c577a89b_Enumerated__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___6ed5375d64984881b234f01e25bc55b9___QsRef1__Identity____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___27b80d3c0afd4dd68f55d127c5cdfce5_MappedByIndex__body(%Callable* %0, %Array* %array) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret %Array* %1 +} + +define internal i64 @Microsoft__Quantum__Math__Round__body(double %value) { +entry: + %0 = call { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef2__ExtendedTruncation____body(double %value) + %1 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 0 + %truncated = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 1 + %remainder = load double, double* %2, align 8 + %3 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 2 + %isPositive = load i1, i1* %3, align 1 + %4 = call double @Microsoft__Quantum__Math__AbsD__body(double %remainder) + %5 = fcmp ole double %4, 1.000000e-15 + br i1 %5, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %6 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret i64 %truncated + +else__1: ; preds = %entry + %abs = call double @Microsoft__Quantum__Math__AbsD__body(double %remainder) + %7 = fcmp ole double %abs, 5.000000e-01 + br i1 %7, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + br i1 %isPositive, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condTrue__2 + %8 = phi i64 [ 1, %condTrue__2 ], [ -1, %condFalse__2 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %9 = phi i64 [ 0, %condTrue__1 ], [ %8, %condContinue__2 ] + %10 = add i64 %truncated, %9 + %11 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret i64 %10 + +continue__1: ; No predecessors! + unreachable +} + +define internal { i64, %Callable* }* @Microsoft__Quantum__MachineLearning__StateGenerator__body(i64 %NQubits, %Callable* %Prepare) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %Prepare, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %Prepare, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Callable* }* getelementptr ({ i64, %Callable* }, { i64, %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64, %Callable* }* + %2 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %1, i32 0, i32 1 + store i64 %NQubits, i64* %2, align 4 + store %Callable* %Prepare, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %Prepare, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %Prepare, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %Prepare, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %Prepare, i32 -1) + ret { i64, %Callable* }* %1 +} + +define internal void @Lifted__PartialApplication__10__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, { %Array* }* }* + %5 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, { %Array* }* }* + %5 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, { %Array* }* }* }* getelementptr ({ %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, { %Array* }* }* %9, { %Array*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__10__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, { %Array* }* }* }* getelementptr ({ %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, { %Array* }* }* %9, { %Array*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____body(%Array* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____adj(%Array* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, { %Array* }* }*, { %Array*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____ctl(%Array* %3, { %Array*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, { %Array* }* }*, { %Array*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__ApplyTwoQubitCase____ctladj(%Array* %3, { %Array*, { %Array* }* }* %4) + ret void +} + +define internal void @MemoryManagement__9__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__9__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__Math__Lg__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + %1 = call double @Microsoft__Quantum__Math__LogOf2__body() + %2 = fdiv double %0, %1 + ret double %2 +} + +define internal %Callable* @Microsoft__Quantum__Canon___1809700b885a46aeb0473713f7c55f2f_BoundCA__body(%Array* %operations) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %operations, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %operations, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__29__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__17__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + ret %Callable* %20 +} + +define internal %Callable* @Microsoft__Quantum__Preparation___CompileApproximateArbitraryStatePreparation__body(double %tolerance, %Array* %coefficients, i64 %nQubits) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = trunc i64 %nQubits to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %12 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___591aa3c3d09b40fd80ccf1fad0bc50fd_Padded__body(i64 %11, { double, double }* %12, %Array* %coefficients) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %15) + %18 = bitcast i8* %17 to { double, double }** + %19 = load { double, double }*, { double, double }** %18, align 8 + %20 = bitcast { double, double }* %19 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %22 = icmp sgt i64 %nQubits, 1 + %23 = sub i64 %nQubits, 1 + %24 = insertvalue %Range { i64 1, i64 1, i64 0 }, i64 %23, 2 + %rngControl = select i1 %22, %Range %24, %Range { i64 1, i64 1, i64 0 } + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Range, i64 }* + %27 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %26, i32 0, i32 1 + store %Range %rngControl, %Range* %27, align 4 + store i64 0, i64* %28, align 4 + %plan = call %Array* @Microsoft__Quantum__Preparation____QsRef1__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficientsPadded, { %Range, i64 }* %26) + %29 = call i64 @__quantum__rt__array_get_size_1d(%Array* %plan) + %30 = sub i64 %29, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %36, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %31) + %34 = bitcast i8* %33 to %Callable** + %35 = load %Callable*, %Callable** %34, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %35, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %35, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %36 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 1) + %unprepare = call %Callable* @Microsoft__Quantum__Canon___7af5478fdcef46609b03bed279a41c2b_BoundCA__body(%Array* %plan) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 1) + %37 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %38 = call %Callable* @__quantum__rt__callable_copy(%Callable* %unprepare, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %38) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, %Callable* }* + %41 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %40, i32 0, i32 1 + store %Callable* %37, %Callable** %41, align 8 + store %Callable* %38, %Callable** %42, align 8 + %43 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__35__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__22__FunctionTable, %Tuple* %39) + %44 = sub i64 %0, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %45 = phi i64 [ 0, %exit__3 ], [ %51, %exiting__4 ] + %46 = icmp sle i64 %45, %44 + br i1 %46, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %45) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = bitcast { double, double }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %50, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %51 = add i64 %45, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %52 = sub i64 %13, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %53 = phi i64 [ 0, %exit__4 ], [ %59, %exiting__5 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %53) + %56 = bitcast i8* %55 to { double, double }** + %57 = load { double, double }*, { double, double }** %56, align 8 + %58 = bitcast { double, double }* %57 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %59 = add i64 %53, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + %60 = sub i64 %29, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %61 = phi i64 [ 0, %exit__5 ], [ %66, %exiting__6 ] + %62 = icmp sle i64 %61, %60 + br i1 %62, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %61) + %64 = bitcast i8* %63 to %Callable** + %65 = load %Callable*, %Callable** %64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %65, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %65, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %66 = add i64 %61, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %unprepare, i32 -1) + %67 = bitcast { double, double }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %67, i32 -1) + %68 = sub i64 %13, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %69 = phi i64 [ 0, %exit__6 ], [ %75, %exiting__7 ] + %70 = icmp sle i64 %69, %68 + br i1 %70, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %71 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 %69) + %72 = bitcast i8* %71 to { double, double }** + %73 = load { double, double }*, { double, double }** %72, align 8 + %74 = bitcast { double, double }* %73 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %75 = add i64 %69, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + %76 = sub i64 %29, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %77 = phi i64 [ 0, %exit__7 ], [ %82, %exiting__8 ] + %78 = icmp sle i64 %77, %76 + br i1 %78, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %plan, i64 %77) + %80 = bitcast i8* %79 to %Callable** + %81 = load %Callable*, %Callable** %80, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %81, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %81, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %82 = add i64 %77, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %plan, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %unprepare, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %unprepare, i32 -1) + ret %Callable* %43 +} + +define internal void @Lifted__PartialApplication__11__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, { %Array* }* }* getelementptr ({ %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %6, i32 0, i32 2 + store %Array* %2, %Array** %7, align 8 + store %Array* %4, %Array** %8, align 8 + %10 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %10, { %Array* }** %9, align 8 + %11 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %12 = load %Callable*, %Callable** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %5, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, { %Array* }* }* getelementptr ({ %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %6, i32 0, i32 2 + store %Array* %2, %Array** %7, align 8 + store %Array* %4, %Array** %8, align 8 + %10 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %10, { %Array* }** %9, align 8 + %11 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %12 = load %Callable*, %Callable** %11, align 8 + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %12, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %5, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, { %Array* }* }* getelementptr ({ %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Array*, { %Array* }* }* + %12 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %11, i32 0, i32 2 + store %Array* %7, %Array** %12, align 8 + store %Array* %9, %Array** %13, align 8 + store { %Array* }* %4, { %Array* }** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array*, { %Array* }* }* }* getelementptr ({ %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { %Array*, %Array*, { %Array* }* }* }* + %17 = getelementptr inbounds { %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { %Array*, %Array*, { %Array* }* }* %11, { %Array*, %Array*, { %Array* }* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__11__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, { %Array* }* }* getelementptr ({ %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Array*, %Array*, { %Array* }* }* + %12 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %11, i32 0, i32 2 + store %Array* %7, %Array** %12, align 8 + store %Array* %9, %Array** %13, align 8 + store { %Array* }* %4, { %Array* }** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array*, { %Array* }* }* }* getelementptr ({ %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { %Array*, %Array*, { %Array* }* }* }* + %17 = getelementptr inbounds { %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { %Array*, %Array*, { %Array* }* }* %11, { %Array*, %Array*, { %Array* }* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load { %Array* }*, { %Array* }** %3, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____body(%Array* %4, %Array* %5, { %Array* }* %6) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Array*, %Array*, { %Array* }* }, { %Array*, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %1, align 8 + %5 = load %Array*, %Array** %2, align 8 + %6 = load { %Array* }*, { %Array* }** %3, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____adj(%Array* %4, %Array* %5, { %Array* }* %6) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array*, { %Array* }* }*, { %Array*, %Array*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____ctl(%Array* %3, { %Array*, %Array*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array*, { %Array* }* }* }, { %Array*, { %Array*, %Array*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array*, { %Array* }* }*, { %Array*, %Array*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__ReflectAboutNegativeCoefficients____ctladj(%Array* %3, { %Array*, %Array*, { %Array* }* }* %4) + ret void +} + +define internal void @MemoryManagement__10__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { double, double }** + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__10__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Array*, %Array* }, { %Callable*, %Array*, %Array* }* %0, i32 0, i32 2 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { double, double }** + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { double, double, i64, i64, i64, i64, double, i64, %Callable* }* @Microsoft__Quantum__MachineLearning__TrainingOptions__body(double %LearningRate, double %Tolerance, i64 %MinibatchSize, i64 %NMeasurements, i64 %MaxEpochs, i64 %MaxStalls, double %StochasticRescaleFactor, i64 %ScoringPeriod, %Callable* %VerboseMessage) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %VerboseMessage, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %VerboseMessage, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double, i64, i64, i64, i64, double, i64, %Callable* }* getelementptr ({ double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double, i64, i64, i64, i64, double, i64, %Callable* }* + %2 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 1 + %4 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 2 + %5 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 3 + %6 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 4 + %7 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 5 + %8 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 6 + %9 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 7 + %10 = getelementptr inbounds { double, double, i64, i64, i64, i64, double, i64, %Callable* }, { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1, i32 0, i32 8 + store double %LearningRate, double* %2, align 8 + store double %Tolerance, double* %3, align 8 + store i64 %MinibatchSize, i64* %4, align 4 + store i64 %NMeasurements, i64* %5, align 4 + store i64 %MaxEpochs, i64* %6, align 4 + store i64 %MaxStalls, i64* %7, align 4 + store double %StochasticRescaleFactor, double* %8, align 8 + store i64 %ScoringPeriod, i64* %9, align 4 + store %Callable* %VerboseMessage, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %VerboseMessage, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %VerboseMessage, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %VerboseMessage, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %VerboseMessage, i32 -1) + ret { double, double, i64, i64, i64, i64, double, i64, %Callable* }* %1 +} + +define internal void @Microsoft__Quantum__Canon___9e001bb7f66049e0a843d161f2c1deac_Ignore__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %String* }* + %1 = getelementptr inbounds { %String* }, { %String* }* %0, i32 0, i32 0 + %2 = load %String*, %String** %1, align 8 + call void @Microsoft__Quantum__Canon___9e001bb7f66049e0a843d161f2c1deac_Ignore__body(%String* %2) + ret void +} + +define internal void @Microsoft__Quantum__Canon___9e001bb7f66049e0a843d161f2c1deac_Ignore__body(%String* %value) { +entry: + ret void +} + +define internal i1 @Microsoft__Quantum__Arrays___b8502d12d9d54d60a228b6d21de14ed7_IsEmpty__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { { i64, %Array* }*, i2, i64 }** + %6 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %5, align 8 + %7 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %6, i32 0, i32 0 + %8 = load { i64, %Array* }*, { i64, %Array* }** %7, align 8 + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %8, i32 0, i32 1 + %10 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %11 = bitcast { i64, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %11, i32 1) + %12 = bitcast { { i64, %Array* }*, i2, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %14 = icmp eq i64 %0, 0 + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to { { i64, %Array* }*, i2, i64 }** + %20 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %19, align 8 + %21 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %20, i32 0, i32 0 + %22 = load { i64, %Array* }*, { i64, %Array* }** %21, align 8 + %23 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %22, i32 0, i32 1 + %24 = load %Array*, %Array** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %24, i32 -1) + %25 = bitcast { i64, %Array* }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + %26 = bitcast { { i64, %Array* }*, i2, i64 }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %14 +} + +define internal %Array* @Microsoft__Quantum__Arrays___db69bea2cd3249c5b832b64e75b5d986_ForEach__body(%Callable* %action, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %7 = icmp eq i64 %length, 0 + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + %9 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %11 = bitcast i8* %10 to %Array** + %12 = load %Array*, %Array** %11, align 8 + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %12, %Array** %15, align 8 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %13, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { double }* + %18 = getelementptr inbounds { double }, { double }* %17, i32 0, i32 0 + %first = load double, double* %18, align 8 + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %20 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %21 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %22 = icmp sle i64 %21, %9 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %21) + %24 = bitcast i8* %23 to %Array** + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %8 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %20 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %27) + %30 = bitcast i8* %29 to double* + store double %first, double* %30, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %19, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %49, %exiting__4 ] + %33 = icmp sle i64 %idx, %32 + br i1 %33, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %34 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %37 = bitcast i8* %36 to %Array** + %38 = load %Array*, %Array** %37, align 8 + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Array* }* + %41 = getelementptr inbounds { %Array* }, { %Array* }* %40, i32 0, i32 0 + store %Array* %38, %Array** %41, align 8 + %42 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %39, %Tuple* %42) + %43 = bitcast %Tuple* %42 to { double }* + %44 = getelementptr inbounds { double }, { double }* %43, i32 0, i32 0 + %45 = load double, double* %44, align 8 + %46 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %idx) + %47 = bitcast i8* %46 to double* + %48 = load double, double* %47, align 8 + store double %45, double* %47, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %42, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %49 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %50 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + %51 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %52 = phi i64 [ 0, %exit__4 ], [ %57, %exiting__5 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %52) + %55 = bitcast i8* %54 to %Array** + %56 = load %Array*, %Array** %55, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %56, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %57 = add i64 %52, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + ret %Array* %50 +} + +define internal void @Lifted__PartialApplication__12__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { %Array*, %Array*, double }*, i64 }* + %1 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %0, i32 0, i32 2 + %4 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %0, i32 0, i32 3 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, { %Array*, %Array*, double }*, %Array*, i64 }* getelementptr ({ double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, { %Array*, %Array*, double }*, %Array*, i64 }* + %12 = getelementptr inbounds { double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store { %Array*, %Array*, double }* %4, { %Array*, %Array*, double }** %13, align 8 + store %Array* %7, %Array** %14, align 8 + store i64 %9, i64* %15, align 4 + %16 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__EstimateClassificationProbability__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, { %Array*, %Array*, double }*, %Array*, i64 }* + %1 = getelementptr inbounds { double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, { %Array*, %Array*, double }*, %Array*, i64 }, { double, { %Array*, %Array*, double }*, %Array*, i64 }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %2, align 8 + %7 = load %Array*, %Array** %3, align 8 + %8 = load i64, i64* %4, align 4 + %9 = call double @Microsoft__Quantum__MachineLearning__EstimateClassificationProbability__body(double %5, { %Array*, %Array*, double }* %6, %Array* %7, i64 %8) + %10 = bitcast %Tuple* %result-tuple to { double }* + %11 = getelementptr inbounds { double }, { double }* %10, i32 0, i32 0 + store double %9, double* %11, align 8 + ret void +} + +define internal void @MemoryManagement__11__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { %Array*, %Array*, double }*, i64 }* + %1 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %0, i32 0, i32 2 + %4 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %3, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %4, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { i64, %Array* }*, i2, i64 }** + %13 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %12, align 8 + %14 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %13, i32 0, i32 0 + %15 = load { i64, %Array* }*, { i64, %Array* }** %14, align 8 + %16 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %15, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 %count-change) + %18 = bitcast { i64, %Array* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 %count-change) + %19 = bitcast { { i64, %Array* }*, i2, i64 }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 %count-change) + %21 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %4, i32 0, i32 1 + %22 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 %count-change) + %23 = bitcast { %Array*, %Array*, double }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__11__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, { %Array*, %Array*, double }*, i64 }* + %1 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, { %Array*, %Array*, double }*, i64 }, { %Callable*, double, { %Array*, %Array*, double }*, i64 }* %0, i32 0, i32 2 + %4 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %3, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %4, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %6) + %8 = sub i64 %7, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %9 = phi i64 [ 0, %entry ], [ %20, %exiting__1 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %6, i64 %9) + %12 = bitcast i8* %11 to { { i64, %Array* }*, i2, i64 }** + %13 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %12, align 8 + %14 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %13, i32 0, i32 0 + %15 = load { i64, %Array* }*, { i64, %Array* }** %14, align 8 + %16 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %15, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 %count-change) + %18 = bitcast { i64, %Array* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 %count-change) + %19 = bitcast { { i64, %Array* }*, i2, i64 }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %19, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %9, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 %count-change) + %21 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %4, i32 0, i32 1 + %22 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %22, i32 %count-change) + %23 = bitcast { %Array*, %Array*, double }* %4 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal double @Microsoft__Quantum__MachineLearning__EstimateClassificationProbability__body(double %tolerance, { %Array*, %Array*, double }* %model, %Array* %sample, i64 %nMeasurements) { +entry: + %0 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %15, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { { i64, %Array* }*, i2, i64 }** + %8 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %7, align 8 + %9 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %8, i32 0, i32 0 + %10 = load { i64, %Array* }*, { i64, %Array* }** %9, align 8 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %10, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { i64, %Array* }* %10 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = bitcast { { i64, %Array* }*, i2, i64 }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %15 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %model, i32 0, i32 1 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array*, %Array*, double }* %model to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %sample, i32 1) + %19 = sitofp i64 %2 to double + %20 = fdiv double %tolerance, %19 + %encodedSample = call { i64, %Callable* }* @Microsoft__Quantum__MachineLearning__ApproximateInputEncoder__body(double %20, %Array* %sample) + %21 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %encodedSample, i32 0, i32 1 + %22 = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %22, i32 1) + %23 = bitcast { i64, %Callable* }* %encodedSample to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 1) + %24 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 1) + %25 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %26 = phi i64 [ 0, %exit__1 ], [ %37, %exiting__2 ] + %27 = icmp sle i64 %26, %25 + br i1 %27, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %26) + %29 = bitcast i8* %28 to { { i64, %Array* }*, i2, i64 }** + %30 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %29, align 8 + %31 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %30, i32 0, i32 0 + %32 = load { i64, %Array* }*, { i64, %Array* }** %31, align 8 + %33 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %32, i32 0, i32 1 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 1) + %35 = bitcast { i64, %Array* }* %32 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 1) + %36 = bitcast { { i64, %Array* }*, i2, i64 }* %30 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %37 = add i64 %26, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %38 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, { %Array*, %Array*, double }* }* getelementptr ({ %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* null, i32 1) to i64)) + %39 = bitcast %Tuple* %38 to { %Callable*, %Callable*, { %Array*, %Array*, double }* }* + %40 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %39, i32 0, i32 0 + %41 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %39, i32 0, i32 1 + %42 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %39, i32 0, i32 2 + store %Callable* %24, %Callable** %40, align 8 + store %Callable* %22, %Callable** %41, align 8 + store { %Array*, %Array*, double }* %model, { %Array*, %Array*, double }** %42, align 8 + %43 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__13__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__12__FunctionTable, %Tuple* %38) + %44 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %encodedSample, i32 0, i32 0 + %45 = load i64, i64* %44, align 4 + %46 = call %Callable* @Microsoft__Quantum__MachineLearning____QsRef0__TailMeasurement____body(i64 %45) + %47 = call double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %43, %Callable* %46, i64 %45, i64 %nMeasurements) + %48 = fsub double 1.000000e+00, %47 + %49 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %50 = phi i64 [ 0, %exit__2 ], [ %61, %exiting__3 ] + %51 = icmp sle i64 %50, %49 + br i1 %51, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %52 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %50) + %53 = bitcast i8* %52 to { { i64, %Array* }*, i2, i64 }** + %54 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %53, align 8 + %55 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %54, i32 0, i32 0 + %56 = load { i64, %Array* }*, { i64, %Array* }** %55, align 8 + %57 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %56, i32 0, i32 1 + %58 = load %Array*, %Array** %57, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 -1) + %59 = bitcast { i64, %Array* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %59, i32 -1) + %60 = bitcast { { i64, %Array* }*, i2, i64 }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %60, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %61 = add i64 %50, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %sample, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %22, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %43, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + ret double %48 +} + +define internal double @Microsoft__Quantum__Characterization__EstimateFrequencyA__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 1) + %0 = call double @Microsoft__Quantum__Characterization__EstimateFrequency__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 -1) + ret double %0 +} + +define internal void @Lifted__PartialApplication__13__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 2 + %4 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array*, %Array*, double }*, %Array* }* getelementptr ({ %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array*, %Array*, double }*, %Array* }* + %10 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store { %Array*, %Array*, double }* %4, { %Array*, %Array*, double }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__13__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 2 + %4 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %3, align 8 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array*, %Array*, double }*, %Array* }* getelementptr ({ %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array*, %Array*, double }*, %Array* }* + %10 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %9, i32 0, i32 2 + store %Callable* %2, %Callable** %10, align 8 + store { %Array*, %Array*, double }* %4, { %Array*, %Array*, double }** %11, align 8 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array*, %Array*, double }*, %Array* }* + %1 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____body(%Callable* %4, { %Array*, %Array*, double }* %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array*, %Array*, double }*, %Array* }* + %1 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { %Callable*, { %Array*, %Array*, double }*, %Array* }, { %Callable*, { %Array*, %Array*, double }*, %Array* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %1, align 8 + %5 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %2, align 8 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__MachineLearning____QsRef0__PrepareClassification____adj(%Callable* %4, { %Array*, %Array*, double }* %5, %Array* %6) + ret void +} + +define internal void @MemoryManagement__12__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 2 + %6 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %8) + %10 = sub i64 %9, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %11 = phi i64 [ 0, %entry ], [ %22, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %11) + %14 = bitcast i8* %13 to { { i64, %Array* }*, i2, i64 }** + %15 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %14, align 8 + %16 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %15, i32 0, i32 0 + %17 = load { i64, %Array* }*, { i64, %Array* }** %16, align 8 + %18 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %17, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 %count-change) + %20 = bitcast { i64, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 %count-change) + %21 = bitcast { { i64, %Array* }*, i2, i64 }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %22 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %23 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %6, i32 0, i32 1 + %24 = load %Array*, %Array** %23, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 %count-change) + %25 = bitcast { %Array*, %Array*, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__12__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable*, { %Array*, %Array*, double }* }* + %1 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, %Callable*, { %Array*, %Array*, double }* }, { %Callable*, %Callable*, { %Array*, %Array*, double }* }* %0, i32 0, i32 2 + %6 = load { %Array*, %Array*, double }*, { %Array*, %Array*, double }** %5, align 8 + %7 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %8) + %10 = sub i64 %9, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %11 = phi i64 [ 0, %entry ], [ %22, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %11) + %14 = bitcast i8* %13 to { { i64, %Array* }*, i2, i64 }** + %15 = load { { i64, %Array* }*, i2, i64 }*, { { i64, %Array* }*, i2, i64 }** %14, align 8 + %16 = getelementptr inbounds { { i64, %Array* }*, i2, i64 }, { { i64, %Array* }*, i2, i64 }* %15, i32 0, i32 0 + %17 = load { i64, %Array* }*, { i64, %Array* }** %16, align 8 + %18 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %17, i32 0, i32 1 + %19 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 %count-change) + %20 = bitcast { i64, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %20, i32 %count-change) + %21 = bitcast { { i64, %Array* }*, i2, i64 }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %22 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %23 = getelementptr inbounds { %Array*, %Array*, double }, { %Array*, %Array*, double }* %6, i32 0, i32 1 + %24 = load %Array*, %Array** %23, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %24, i32 %count-change) + %25 = bitcast { %Array*, %Array*, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal i64 @Microsoft__Quantum__Math__Ceiling__body(double %value) { +entry: + %0 = call { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef2__ExtendedTruncation____body(double %value) + %1 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 0 + %truncated = load i64, i64* %1, align 4 + %2 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 1 + %remainder = load double, double* %2, align 8 + %3 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %0, i32 0, i32 2 + %isPositive = load i1, i1* %3, align 1 + %4 = call double @Microsoft__Quantum__Math__AbsD__body(double %remainder) + %5 = fcmp ole double %4, 1.000000e-15 + br i1 %5, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %6 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret i64 %truncated + +else__1: ; preds = %entry + br i1 %isPositive, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %7 = add i64 %truncated, 1 + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %8 = phi i64 [ %7, %condTrue__1 ], [ %truncated, %condFalse__1 ] + %9 = bitcast { i64, double, i1 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret i64 %8 + +continue__1: ; No predecessors! + unreachable +} + +define internal i64 @Microsoft__Quantum__MachineLearning__InferredLabel__body(double %bias, double %probability) { +entry: + %0 = fadd double %probability, %bias + %1 = fcmp ogt double %0, 5.000000e-01 + %2 = select i1 %1, i64 1, i64 0 + ret i64 %2 +} + +define internal %Array* @Microsoft__Quantum__Arrays___d3aba77e00014a79bc8f48ec51f8fb2a_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to double* + %4 = load double, double* %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { double }* + %7 = getelementptr inbounds { double }, { double }* %6, i32 0, i32 0 + store double %4, double* %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { i64 }* + %10 = getelementptr inbounds { i64 }, { i64 }* %9, i32 0, i32 0 + %first = load i64, i64* %10, align 4 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %12 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %13 = phi i64 [ 0, %continue__1 ], [ %17, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %13) + %16 = bitcast i8* %15 to i64* + store i64 %first, i64* %16, align 4 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %11, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %18 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %34, %exiting__2 ] + %19 = icmp sle i64 %idx, %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + %21 = call %Array* @__quantum__rt__array_copy(%Array* %20, i1 false) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %23 = bitcast i8* %22 to double* + %24 = load double, double* %23, align 8 + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { double }* + %27 = getelementptr inbounds { double }, { double }* %26, i32 0, i32 0 + store double %24, double* %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %25, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { i64 }* + %30 = getelementptr inbounds { i64 }, { i64 }* %29, i32 0, i32 0 + %31 = load i64, i64* %30, align 4 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %idx) + %33 = bitcast i8* %32 to i64* + store i64 %31, i64* %33, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + store %Array* %21, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %34 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %35 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %35 +} + +define internal void @Lifted__PartialApplication__14__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { double }* + %4 = getelementptr inbounds { double }, { double }* %3, i32 0, i32 0 + %5 = load double, double* %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, double }* + %8 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, double }, { double, double }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store double %5, double* %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__MachineLearning__InferredLabel__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, double }* + %1 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, double }, { double, double }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load double, double* %2, align 8 + %5 = call i64 @Microsoft__Quantum__MachineLearning__InferredLabel__body(double %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %5, i64* %7, align 4 + ret void +} + +define internal void @MemoryManagement__13__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__13__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___0a5870f6f00b4e91a76081167d0bc275_Where__body(%Callable* %predicate, %Array* %array) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %predicate, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %predicate, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i64, i64 }** + %6 = load { i64, i64 }*, { i64, i64 }** %5, align 8 + %7 = bitcast { i64, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___facc0657b0284c16ae2c0d999b143be0_Fst__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___bfd0dc2872b54301bd24b64a2c23e89e_Snd__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %11 = call %Array* @Microsoft__Quantum__Arrays___847843f64261497cbcdec42c04c12cc1_Mapped__body(%Callable* %predicate, %Array* %array) + %12 = call %Array* @Microsoft__Quantum__Arrays___5ef583d7ebd84277a2b7db5af95f2088_Enumerated__body(%Array* %11) + %13 = call %Array* @Microsoft__Quantum__Arrays___4aa279afe82a49f18051c32a38c71fb7_Filtered__body(%Callable* %10, %Array* %12) + %14 = call %Array* @Microsoft__Quantum__Arrays___6c6f349b2d0c4e67b944e93ebc590a5b_Mapped__body(%Callable* %9, %Array* %13) + call void @__quantum__rt__capture_update_alias_count(%Callable* %predicate, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %predicate, i32 -1) + %15 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %16) + %19 = bitcast i8* %18 to { i64, i64 }** + %20 = load { i64, i64 }*, { i64, i64 }** %19, align 8 + %21 = bitcast { i64, i64 }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + %23 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %24 = sub i64 %23, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %25 = phi i64 [ 0, %exit__2 ], [ %31, %exiting__3 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %25) + %28 = bitcast i8* %27 to { i64, i1 }** + %29 = load { i64, i1 }*, { i64, i1 }** %28, align 8 + %30 = bitcast { i64, i1 }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + %32 = call i64 @__quantum__rt__array_get_size_1d(%Array* %13) + %33 = sub i64 %32, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %34 = phi i64 [ 0, %exit__3 ], [ %40, %exiting__4 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %13, i64 %34) + %37 = bitcast i8* %36 to { i64, i1 }** + %38 = load { i64, i1 }*, { i64, i1 }** %37, align 8 + %39 = bitcast { i64, i1 }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %40 = add i64 %34, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %13, i32 -1) + ret %Array* %14 +} + +define internal void @Microsoft__Quantum__Logical__NotEqualI__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* + %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load i64, i64* %2, align 4 + %5 = call i1 @Microsoft__Quantum__Logical__NotEqualI__body(i64 %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { i1 }* + %7 = getelementptr inbounds { i1 }, { i1 }* %6, i32 0, i32 0 + store i1 %5, i1* %7, align 1 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___4676d5f0d26141cfa6e357563bd65669_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i64* + %7 = load i64, i64* %6, align 4 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to i64* + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64 }* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i64, i64 }* + %13 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %12, i32 0, i32 1 + store i64 %7, i64* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i64, i64 }** + store { i64, i64 }* %12, { i64, i64 }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i64, i64 }** + %27 = load { i64, i64 }*, { i64, i64 }** %26, align 8 + %28 = bitcast { i64, i64 }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i64* + %36 = load i64, i64* %35, align 4 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to i64* + %39 = load i64, i64* %38, align 4 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64 }* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i64, i64 }* + %42 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %41, i32 0, i32 1 + store i64 %36, i64* %42, align 4 + store i64 %39, i64* %43, align 4 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i64, i64 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i64, i64 }*, { i64, i64 }** %45, align 8 + %47 = bitcast { i64, i64 }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i64, i64 }* %41, { i64, i64 }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i64, i64 }** + %56 = load { i64, i64 }*, { i64, i64 }** %55, align 8 + %57 = bitcast { i64, i64 }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal i1 @Microsoft__Quantum__Logical__NotEqualI__body(i64 %a, i64 %b) { +entry: + %0 = icmp ne i64 %a, %b + ret i1 %0 +} + +define internal i64 @Microsoft__Quantum__Arrays___2d4fd0446d3a462ca520e9612ada343e_Fold__body(%Callable* %folder, i64 %state, %Array* %array) { +entry: + %current = alloca i64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store i64 %state, i64* %current, align 4 + %0 = call %Range @Microsoft__Quantum__Arrays___51e146e9e3f741e2a6043055f76ad080_IndexRange__body(%Array* %array) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxElement = phi i64 [ %1, %preheader__1 ], [ %20, %exiting__1 ] + %5 = icmp sle i64 %idxElement, %3 + %6 = icmp sge i64 %idxElement, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = load i64, i64* %current, align 4 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %10 = bitcast i8* %9 to i64* + %11 = load i64, i64* %10, align 4 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64 }* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i64, i64 }* + %14 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %13, i32 0, i32 1 + store i64 %8, i64* %14, align 4 + store i64 %11, i64* %15, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %folder, %Tuple* %12, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { i64 }* + %18 = getelementptr inbounds { i64 }, { i64 }* %17, i32 0, i32 0 + %19 = load i64, i64* %18, align 4 + store i64 %19, i64* %current, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %idxElement, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + %21 = load i64, i64* %current, align 4 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i64 %21 +} + +define internal void @Microsoft__Quantum__Math__MaxI__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i64 }* + %1 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load i64, i64* %2, align 4 + %5 = call i64 @Microsoft__Quantum__Math__MaxI__body(i64 %3, i64 %4) + %6 = bitcast %Tuple* %result-tuple to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %5, i64* %7, align 4 + ret void +} + +define internal %Array* @Microsoft__Quantum__MachineLearning___6dd27c99de61421cb8da3bf3154034a7_Sampled__body({ %Array* }* %schedule, %Array* %values) { +entry: + %sampled = alloca %Array*, align 8 + %0 = getelementptr inbounds { %Array* }, { %Array* }* %schedule, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to %Range* + %8 = load %Range, %Range* %7, align 4 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %10 = bitcast { %Array* }* %schedule to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %values) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %13) + %16 = bitcast i8* %15 to %Array** + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %19, %Array** %sampled, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %19, i32 1) + %20 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %21 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %21) + %24 = bitcast i8* %23 to %Range* + %range = load %Range, %Range* %24, align 4 + %25 = extractvalue %Range %range, 0 + %26 = extractvalue %Range %range, 1 + %27 = extractvalue %Range %range, 2 + br label %preheader__1 + +exiting__3: ; preds = %exit__4 + %28 = add i64 %21, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %29 = load %Array*, %Array** %sampled, align 8 + %30 = sub i64 %2, 1 + br label %header__10 + +preheader__1: ; preds = %body__3 + %31 = icmp sgt i64 %26, 0 + br label %header__4 + +header__4: ; preds = %exiting__4, %preheader__1 + %index = phi i64 [ %25, %preheader__1 ], [ %45, %exiting__4 ] + %32 = icmp sle i64 %index, %27 + %33 = icmp sge i64 %index, %27 + %34 = select i1 %31, i1 %32, i1 %33 + br i1 %34, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %35 = load %Array*, %Array** %sampled, align 8 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %index) + %37 = bitcast i8* %36 to %Array** + %38 = load %Array*, %Array** %37, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %38, i32 1) + %39 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %39, i64 0) + %41 = bitcast i8* %40 to %Array** + store %Array* %38, %Array** %41, align 8 + %42 = call %Array* @__quantum__rt__array_concatenate(%Array* %35, %Array* %39) + %43 = call i64 @__quantum__rt__array_get_size_1d(%Array* %42) + %44 = sub i64 %43, 1 + br label %header__5 + +exiting__4: ; preds = %exit__9 + %45 = add i64 %index, %26 + br label %header__4 + +exit__4: ; preds = %header__4 + br label %exiting__3 + +header__5: ; preds = %exiting__5, %body__4 + %46 = phi i64 [ 0, %body__4 ], [ %51, %exiting__5 ] + %47 = icmp sle i64 %46, %44 + br i1 %47, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %46) + %49 = bitcast i8* %48 to %Array** + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %50, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %51 = add i64 %46, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 1) + %52 = sub i64 %43, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %53 = phi i64 [ 0, %exit__5 ], [ %58, %exiting__6 ] + %54 = icmp sle i64 %53, %52 + br i1 %54, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %55 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %42, i64 %53) + %56 = bitcast i8* %55 to %Array** + %57 = load %Array*, %Array** %56, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %58 = add i64 %53, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 1) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %35) + %60 = sub i64 %59, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %61 = phi i64 [ 0, %exit__6 ], [ %66, %exiting__7 ] + %62 = icmp sle i64 %61, %60 + br i1 %62, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %61) + %64 = bitcast i8* %63 to %Array** + %65 = load %Array*, %Array** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %66 = add i64 %61, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 -1) + %67 = sub i64 %59, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %68 = phi i64 [ 0, %exit__7 ], [ %73, %exiting__8 ] + %69 = icmp sle i64 %68, %67 + br i1 %69, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %68) + %71 = bitcast i8* %70 to %Array** + %72 = load %Array*, %Array** %71, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %73 = add i64 %68, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %35, i32 -1) + store %Array* %42, %Array** %sampled, align 8 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %74 = phi i64 [ 0, %exit__8 ], [ %79, %exiting__9 ] + %75 = icmp sle i64 %74, 0 + br i1 %75, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %76 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %39, i64 %74) + %77 = bitcast i8* %76 to %Array** + %78 = load %Array*, %Array** %77, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %79 = add i64 %74, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %39, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + br label %exiting__4 + +header__10: ; preds = %exiting__10, %exit__3 + %80 = phi i64 [ 0, %exit__3 ], [ %85, %exiting__10 ] + %81 = icmp sle i64 %80, %30 + br i1 %81, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %82 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %80) + %83 = bitcast i8* %82 to %Range* + %84 = load %Range, %Range* %83, align 4 + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %85 = add i64 %80, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + %86 = sub i64 %11, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %87 = phi i64 [ 0, %exit__10 ], [ %92, %exiting__11 ] + %88 = icmp sle i64 %87, %86 + br i1 %88, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %89 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %87) + %90 = bitcast i8* %89 to %Array** + %91 = load %Array*, %Array** %90, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 -1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %92 = add i64 %87, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + %93 = call i64 @__quantum__rt__array_get_size_1d(%Array* %29) + %94 = sub i64 %93, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %95 = phi i64 [ 0, %exit__11 ], [ %100, %exiting__12 ] + %96 = icmp sle i64 %95, %94 + br i1 %96, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %97 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %29, i64 %95) + %98 = bitcast i8* %97 to %Array** + %99 = load %Array*, %Array** %98, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %100 = add i64 %95, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %29, i32 -1) + ret %Array* %29 +} + +define internal %Array* @Microsoft__Quantum__MachineLearning___9df2eba66a764c8abe9e53ac519cccaa_Sampled__body({ %Array* }* %schedule, %Array* %values) { +entry: + %sampled = alloca %Array*, align 8 + %0 = getelementptr inbounds { %Array* }, { %Array* }* %schedule, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to %Range* + %8 = load %Range, %Range* %7, align 4 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %10 = bitcast { %Array* }* %schedule to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 1) + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %11, %Array** %sampled, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %20, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %13) + %16 = bitcast i8* %15 to %Range* + %range = load %Range, %Range* %16, align 4 + %17 = extractvalue %Range %range, 0 + %18 = extractvalue %Range %range, 1 + %19 = extractvalue %Range %range, 2 + br label %preheader__1 + +exiting__2: ; preds = %exit__3 + %20 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %21 = load %Array*, %Array** %sampled, align 8 + %22 = sub i64 %2, 1 + br label %header__4 + +preheader__1: ; preds = %body__2 + %23 = icmp sgt i64 %18, 0 + br label %header__3 + +header__3: ; preds = %exiting__3, %preheader__1 + %index = phi i64 [ %17, %preheader__1 ], [ %35, %exiting__3 ] + %24 = icmp sle i64 %index, %19 + %25 = icmp sge i64 %index, %19 + %26 = select i1 %23, i1 %24, i1 %25 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = load %Array*, %Array** %sampled, align 8 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %values, i64 %index) + %29 = bitcast i8* %28 to i64* + %30 = load i64, i64* %29, align 4 + %31 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 0) + %33 = bitcast i8* %32 to i64* + store i64 %30, i64* %33, align 4 + %34 = call %Array* @__quantum__rt__array_concatenate(%Array* %27, %Array* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + store %Array* %34, %Array** %sampled, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %index, %18 + br label %header__3 + +exit__3: ; preds = %header__3 + br label %exiting__2 + +header__4: ; preds = %exiting__4, %exit__2 + %36 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__4 ] + %37 = icmp sle i64 %36, %22 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %36) + %39 = bitcast i8* %38 to %Range* + %40 = load %Range, %Range* %39, align 4 + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %41 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %values, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 -1) + ret %Array* %21 +} + +define internal { i64, i64 }* @Microsoft__Quantum__MachineLearning__ValidationResults__body(i64 %NMisclassifications, i64 %NValidationSamples) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i64 }* getelementptr ({ i64, i64 }, { i64, i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { i64, i64 }* + %2 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { i64, i64 }, { i64, i64 }* %1, i32 0, i32 1 + store i64 %NMisclassifications, i64* %2, align 4 + store i64 %NValidationSamples, i64* %3, align 4 + ret { i64, i64 }* %1 +} + +define internal void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTest____body(i1 %phaseShift, %Callable* %commonPreparation, %Callable* %preparation1, %Callable* %preparation2, %Qubit* %control, %Array* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + call void @__quantum__qis__h__body(%Qubit* %control) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Array* }* + %2 = getelementptr inbounds { %Array* }, { %Array* }* %1, i32 0, i32 0 + store %Array* %target, %Array** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %commonPreparation, %Tuple* %0, %Tuple* null) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %preparation1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %3) + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %control, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %4, %Array** %9, align 8 + store %Array* %target, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %7, %Tuple* null) + call void @__quantum__qis__x__body(%Qubit* %control) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %preparation2, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 0) + %14 = bitcast i8* %13 to %Qubit** + store %Qubit* %control, %Qubit** %14, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, %Array* }* + %17 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %16, i32 0, i32 1 + store %Array* %12, %Array** %17, align 8 + store %Array* %target, %Array** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %15, %Tuple* null) + call void @__quantum__qis__x__body(%Qubit* %control) + br i1 %phaseShift, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %19 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__I__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %21 = phi %Callable* [ %19, %condTrue__1 ], [ %20, %condFalse__1 ] + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { %Qubit* }* + %24 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %23, i32 0, i32 0 + store %Qubit* %control, %Qubit** %24, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %22, %Tuple* null) + call void @__quantum__qis__h__body(%Qubit* %control) + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__I__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__I__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__I__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__I__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__I__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__I__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__I__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__I__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__I__body(%Qubit* %target) { +entry: + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__I__adj(%Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__I__body(%Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__I__ctl(%Array* %__controlQubits__, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__I__ctladj(%Array* %__controlQubits__, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @Microsoft__Quantum__Intrinsic__I__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTest____adj(i1 %phaseShift, %Callable* %commonPreparation, %Callable* %preparation1, %Callable* %preparation2, %Qubit* %control, %Array* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + call void @__quantum__qis__h__body(%Qubit* %control) + br i1 %phaseShift, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__I__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi %Callable* [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %2, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Qubit* }* + %6 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %5, i32 0, i32 0 + store %Qubit* %control, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %4, %Tuple* null) + call void @__quantum__qis__x__body(%Qubit* %control) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %preparation2, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__callable_make_adjoint(%Callable* %7) + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %control, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, %Array* }* + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + store %Array* %8, %Array** %13, align 8 + store %Array* %target, %Array** %14, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %11, %Tuple* null) + call void @__quantum__qis__x__body(%Qubit* %control) + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %preparation1, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %15) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + %16 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %16, i64 0) + %18 = bitcast i8* %17 to %Qubit** + store %Qubit* %control, %Qubit** %18, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %20 = bitcast %Tuple* %19 to { %Array*, %Array* }* + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + store %Array* %16, %Array** %21, align 8 + store %Array* %target, %Array** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %19, %Tuple* null) + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %commonPreparation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Array* }* + %26 = getelementptr inbounds { %Array* }, { %Array* }* %25, i32 0, i32 0 + store %Array* %target, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %24, %Tuple* null) + call void @__quantum__qis__h__body(%Qubit* %control) + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____body(i1 %phaseShift, %Callable* %commonPreparation, %Callable* %preparation1, %Callable* %preparation2, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %control = call %Qubit* @Microsoft__Quantum__Arrays___8fb37798f2ee45efaa2fd297b4d00535_Head__body(%Array* %register) + %target = call %Array* @Microsoft__Quantum__Arrays___c6242f92bf314764965b3b33dc50fbb0_Rest__body(%Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + call void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTest____body(i1 %phaseShift, %Callable* %commonPreparation, %Callable* %preparation1, %Callable* %preparation2, %Qubit* %control, %Array* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + ret void +} + +define internal %Qubit* @Microsoft__Quantum__Arrays___8fb37798f2ee45efaa2fd297b4d00535_Head__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp sgt i64 %0, 0 + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @18, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %1, i1 true, %String* %2) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %4 = bitcast i8* %3 to %Qubit** + %5 = load %Qubit*, %Qubit** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + ret %Qubit* %5 +} + +define internal %Array* @Microsoft__Quantum__Arrays___c6242f92bf314764965b3b33dc50fbb0_Rest__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 1, i64 1, i64 0 }, i64 %1, 2 + %3 = call %Array* @__quantum__rt__array_slice_1d(%Array* %array, %Range %2, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + ret %Array* %3 +} + +define internal void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____adj(i1 %phaseShift, %Callable* %commonPreparation, %Callable* %preparation1, %Callable* %preparation2, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %__qsVar0__control__ = call %Qubit* @Microsoft__Quantum__Arrays___8fb37798f2ee45efaa2fd297b4d00535_Head__body(%Array* %register) + %__qsVar1__target__ = call %Array* @Microsoft__Quantum__Arrays___c6242f92bf314764965b3b33dc50fbb0_Rest__body(%Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__target__, i32 1) + call void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTest____adj(i1 %phaseShift, %Callable* %commonPreparation, %Callable* %preparation1, %Callable* %preparation2, %Qubit* %__qsVar0__control__, %Array* %__qsVar1__target__) + call void @__quantum__rt__capture_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %commonPreparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation1, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__target__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__target__, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Characterization____QsRef1__HeadMeasurement____body(i64 %nQubits) { +entry: + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Measure__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___bf1b92e74b5c49e78cf7e075bdb2fa6e_ConstantArray__body(i64 %nQubits, i2 0) + %2 = call %Array* @__quantum__rt__array_copy(%Array* %1, i1 false) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %2, i64 0) + %4 = bitcast i8* %3 to i2* + store i2 -2, i2* %4, align 1 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %0, %Callable** %7, align 8 + store %Array* %2, %Array** %8, align 8 + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__15__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__7__FunctionTable, %Tuple* %5) + ret %Callable* %9 +} + +define internal void @Lifted__PartialApplication__15__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___bf1b92e74b5c49e78cf7e075bdb2fa6e_ConstantArray__body(i64 %length, i2 %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to i2* + store i2 %value, i2* %5, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal double @Microsoft__Quantum__Characterization__EstimateFrequency__body(%Callable* %preparation, %Callable* %measurement, i64 %nQubits, i64 %nMeasurements) { +entry: + %nUp = alloca i64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 1) + store i64 0, i64* %nUp, align 4 + %0 = sub i64 %nMeasurements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %idxMeasurement = phi i64 [ 0, %entry ], [ %16, %exiting__1 ] + %1 = icmp sle i64 %idxMeasurement, %0 + br i1 %1, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %register = call %Array* @__quantum__rt__qubit_allocate_array(i64 %nQubits) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + store %Array* %register, %Array** %4, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %preparation, %Tuple* %2, %Tuple* null) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array* }* + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + store %Array* %register, %Array** %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %measurement, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { %Result* }* + %10 = getelementptr inbounds { %Result* }, { %Result* }* %9, i32 0, i32 0 + %result = load %Result*, %Result** %10, align 8 + %11 = call %Result* @__quantum__rt__result_get_zero() + %12 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %11) + br i1 %12, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + %13 = load i64, i64* %nUp, align 4 + %14 = add i64 %13, 1 + store i64 %14, i64* %nUp, align 4 + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Reset__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___b87f7893639e4ac88fbb8d0a3ba1cbe1_ApplyToEach__body(%Callable* %15, %Array* %register) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %register) + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %16 = add i64 %idxMeasurement, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %17 = load i64, i64* %nUp, align 4 + %18 = sitofp i64 %17 to double + %19 = sitofp i64 %nMeasurements to double + %20 = fdiv double %18, %19 + call void @__quantum__rt__capture_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %preparation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %measurement, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %measurement, i32 -1) + ret double %20 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release_array(%Array*) + +declare %Result* @__quantum__rt__result_get_zero() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define internal void @Microsoft__Quantum__Canon___b87f7893639e4ac88fbb8d0a3ba1cbe1_ApplyToEach__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___fca6ec94ae3342ea8545ea7003b87bf1_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %2) + ret void +} + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal void @Lifted__PartialApplication__16__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i1, %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load i1, i1* %1, align 1 + %3 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 4 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Array* }* + %10 = getelementptr inbounds { %Array* }, { %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Callable*, %Callable*, %Callable*, %Array* }* getelementptr ({ i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i1, %Callable*, %Callable*, %Callable*, %Array* }* + %14 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 4 + store i1 %2, i1* %14, align 1 + store %Callable* %4, %Callable** %15, align 8 + store %Callable* %6, %Callable** %16, align 8 + store %Callable* %8, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__16__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i1, %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load i1, i1* %1, align 1 + %3 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + %5 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + %7 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 4 + %8 = load %Callable*, %Callable** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Array* }* + %10 = getelementptr inbounds { %Array* }, { %Array* }* %9, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Callable*, %Callable*, %Callable*, %Array* }* getelementptr ({ i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i1, %Callable*, %Callable*, %Callable*, %Array* }* + %14 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %13, i32 0, i32 4 + store i1 %2, i1* %14, align 1 + store %Callable* %4, %Callable** %15, align 8 + store %Callable* %6, %Callable** %16, align 8 + store %Callable* %8, %Callable** %17, align 8 + store %Array* %11, %Array** %18, align 8 + %19 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, %Callable*, %Callable*, %Callable*, %Array* }* + %1 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 4 + %6 = load i1, i1* %1, align 1 + %7 = load %Callable*, %Callable** %2, align 8 + %8 = load %Callable*, %Callable** %3, align 8 + %9 = load %Callable*, %Callable** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____body(i1 %6, %Callable* %7, %Callable* %8, %Callable* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, %Callable*, %Callable*, %Callable*, %Array* }* + %1 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { i1, %Callable*, %Callable*, %Callable*, %Array* }, { i1, %Callable*, %Callable*, %Callable*, %Array* }* %0, i32 0, i32 4 + %6 = load i1, i1* %1, align 1 + %7 = load %Callable*, %Callable** %2, align 8 + %8 = load %Callable*, %Callable** %3, align 8 + %9 = load %Callable*, %Callable** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Characterization____QsRef1__ApplyHadamardTestOnSingleRegister____adj(i1 %6, %Callable* %7, %Callable* %8, %Callable* %9, %Array* %10) + ret void +} + +define internal void @MemoryManagement__14__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i1, %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 %count-change) + %7 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 4 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__14__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, i1, %Callable*, %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 2 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 3 + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 %count-change) + %7 = getelementptr inbounds { %Callable*, i1, %Callable*, %Callable*, %Callable* }, { %Callable*, i1, %Callable*, %Callable*, %Callable* }* %0, i32 0, i32 4 + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceCP____body(double %tolerance, %Array* %coefficients) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %10) + %13 = bitcast i8* %12 to { double, double }** + %coefficient = load { double, double }*, { double, double }** %13, align 8 + %14 = bitcast { double, double }* %coefficient to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %coefficient) + %16 = fcmp ogt double %15, %tolerance + br i1 %16, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + %17 = sub i64 %0, 1 + br label %header__3 + +continue__1: ; preds = %body__2 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %continue__1 + %18 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %19 = sub i64 %0, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__3 ] + %21 = icmp sle i64 %20, %17 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %26 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +header__4: ; preds = %exiting__4, %exit__2 + %27 = phi i64 [ 0, %exit__2 ], [ %33, %exiting__4 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %27) + %30 = bitcast i8* %29 to { double, double }** + %31 = load { double, double }*, { double, double }** %30, align 8 + %32 = bitcast { double, double }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %33 = add i64 %27, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 0 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +define internal i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to double* + %coefficient = load double, double* %5, align 8 + %6 = call double @Microsoft__Quantum__Math__AbsD__body(double %coefficient) + %7 = fcmp oge double %6, %tolerance + br i1 %7, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 true + +continue__1: ; preds = %body__1 + br label %exiting__1 + +exiting__1: ; preds = %continue__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + ret i1 false +} + +define internal { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %coefficients) { +entry: + %coefficients1 = alloca %Array*, align 8 + %coefficients0 = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %newCoefficientsLength = sdiv i64 %0, 2 + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %2 = sub i64 %newCoefficientsLength, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to double* + store double 0.000000e+00, double* %6, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %1, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %8 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %newCoefficientsLength) + %9 = sub i64 %newCoefficientsLength, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %14, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 %10) + %13 = bitcast i8* %12 to double* + store double 0.000000e+00, double* %13, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %14 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %8, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %15 = sub i64 %newCoefficientsLength, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxCoeff = phi i64 [ 0, %exit__2 ], [ %44, %exiting__3 ] + %16 = icmp sle i64 %idxCoeff, %15 + br i1 %16, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %17 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + %18 = call %Array* @__quantum__rt__array_copy(%Array* %17, i1 false) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %20 = bitcast i8* %19 to double* + %21 = load double, double* %20, align 8 + %22 = add i64 %idxCoeff, %newCoefficientsLength + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %22) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %26 = fadd double %21, %25 + %27 = fmul double 5.000000e-01, %26 + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %idxCoeff) + %29 = bitcast i8* %28 to double* + store double %27, double* %29, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + store %Array* %18, %Array** %coefficients0, align 8 + %30 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %30, i32 -1) + %31 = call %Array* @__quantum__rt__array_copy(%Array* %30, i1 false) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %33 = bitcast i8* %32 to double* + %34 = load double, double* %33, align 8 + %35 = add i64 %idxCoeff, %newCoefficientsLength + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %35) + %37 = bitcast i8* %36 to double* + %38 = load double, double* %37, align 8 + %39 = fsub double %34, %38 + %40 = fmul double 5.000000e-01, %39 + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %31, i64 %idxCoeff) + %42 = bitcast i8* %41 to double* + %43 = load double, double* %42, align 8 + store double %40, double* %42, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %31, i32 1) + store %Array* %31, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %30, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %44 = add i64 %idxCoeff, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %45 = load %Array*, %Array** %coefficients0, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 1) + %46 = load %Array*, %Array** %coefficients1, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Array*, %Array* }* + %49 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %48, i32 0, i32 1 + store %Array* %45, %Array** %49, align 8 + store %Array* %46, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %46, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %45, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %46, i32 -1) + ret { %Array*, %Array* }* %48 +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___6ad2c538b55a4168b115e6cf3c7d63de_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @12, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %11 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %qubits__1) + %12 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %11) + %13 = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %12, %Qubit* %13) + %14 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %15 = icmp eq i64 %14, 2 + br i1 %15, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %17 = bitcast i8* %16 to double* + %18 = load double, double* %17, align 8 + %19 = call double @Microsoft__Quantum__Math__AbsD__body(double %18) + %20 = fcmp ogt double %19, %tolerance + br i1 %20, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %22 = bitcast i8* %21 to i2* + store i2 0, i2* %22, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %24 = bitcast i8* %23 to double* + %25 = load double, double* %24, align 8 + %theta = fmul double 1.000000e+00, %25 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %26 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %qubits__1) + %27 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %26) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients0, { %Array* }* %27) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %27, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %31 = getelementptr inbounds { %Array* }, { %Array* }* %12, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Arrays___6ad2c538b55a4168b115e6cf3c7d63de_IsEmpty__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp eq i64 %0, 0 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %nElementsTotal, double %defaultElement, %Array* %inputArray) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %0 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %1 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @19, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %0, i1 true, %String* %1) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___d7a5dace8d00477d9dcdf9020a467709_ConstantArray__body(i64 %nElementsPad, double %defaultElement) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %2 = icmp sge i64 %nElementsTotal, 0 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %3 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %4 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %5 = phi %Array* [ %3, %condTrue__1 ], [ %4, %condFalse__1 ] + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %5 +} + +; Function Attrs: nofree nosync nounwind readnone speculatable willreturn +declare double @llvm.powi.f64.i32(double, i32) #0 + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficientsPadded, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + %23 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %24, %Qubit* %target) + %25 = call i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %25, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %26 = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %26, %Qubit* %target) + %27 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %1) + %28 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %27) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %28, %Qubit* %target) + %29 = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %29, %Qubit* %target) + %30 = getelementptr inbounds { %Array* }, { %Array* }* %28, i32 0, i32 0 + %31 = load %Array*, %Array** %30, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + %32 = bitcast { %Array* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %33 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 2 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + %3 = call %Array* @__quantum__rt__array_slice_1d(%Array* %array, %Range %2, i1 true) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + ret %Array* %3 +} + +define internal %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = icmp sgt i64 %0, 0 + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([39 x i8], [39 x i8]* @18, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %1, i1 true, %String* %2) + %3 = sub i64 %0, 1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %3) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + ret %Qubit* %6 +} + +declare void @__quantum__qis__exp__body(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %1 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %2 = call i1 @Microsoft__Quantum__Arrays___6ad2c538b55a4168b115e6cf3c7d63de_IsEmpty__body(%Array* %qubits__1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @12, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__fail(%String* %3) + unreachable + +continue__1: ; preds = %entry + %4 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %5 = trunc i64 %4 to i32 + %6 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %5) + %7 = fptosi double %6 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %7, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %8 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %12 = icmp eq i64 %11, 2 + br i1 %12, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %14 = bitcast i8* %13 to double* + %15 = load double, double* %14, align 8 + %16 = call double @Microsoft__Quantum__Math__AbsD__body(double %15) + %17 = fcmp ogt double %16, %tolerance + br i1 %17, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %19 = bitcast i8* %18 to i2* + store i2 0, i2* %19, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %21 = bitcast i8* %20 to double* + %22 = load double, double* %21, align 8 + %theta = fmul double 1.000000e+00, %22 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits__1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + %23 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %qubits__1) + %24 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %23) + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %24) + %25 = getelementptr inbounds { %Array* }, { %Array* }* %24, i32 0, i32 0 + %26 = load %Array*, %Array** %25, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %23, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + %27 = bitcast { %Array* }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %28 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %qubits__1) + %29 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %28) + %30 = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %qubits__1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %29, %Qubit* %30) + %31 = getelementptr inbounds { %Array* }, { %Array* }* %29, i32 0, i32 0 + %32 = load %Array*, %Array** %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %33 = bitcast { %Array*, %Array* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %28, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = bitcast { %Array* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__adj(%Array*, double, %Array*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %4 = trunc i64 %3 to i32 + %5 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %4) + %6 = fptosi double %5 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %6, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %7 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %8 = icmp eq i64 %7, 1 + br i1 %8, label %then0__1, label %else__1 + +then0__1: ; preds = %entry + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %10 = bitcast i8* %9 to double* + %11 = load double, double* %10, align 8 + %12 = call double @Microsoft__Quantum__Math__AbsD__body(double %11) + %13 = fcmp ogt double %12, %tolerance + br i1 %13, label %then0__2, label %continue__2 + +then0__2: ; preds = %then0__1 + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %15 = bitcast i8* %14 to i2* + store i2 -2, i2* %15, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar0__coefficientsPadded__, i64 0) + %17 = bitcast i8* %16 to double* + %theta = load double, double* %17, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %19 = bitcast i8* %18 to %Qubit** + store %Qubit* %target, %Qubit** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + br label %continue__2 + +continue__2: ; preds = %then0__2, %then0__1 + br label %continue__1 + +else__1: ; preds = %entry + %20 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %21 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %22 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %20, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %22, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %23 = call i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %23, label %then0__3, label %continue__3 + +then0__3: ; preds = %else__1 + %24 = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %24, %Qubit* %target) + %25 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %1) + %26 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %25) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %26, %Qubit* %target) + %27 = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %1) + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %27, %Qubit* %target) + %28 = getelementptr inbounds { %Array* }, { %Array* }* %26, i32 0, i32 0 + %29 = load %Array*, %Array** %28, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %29, i32 -1) + %30 = bitcast { %Array* }* %26 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %else__1 + %31 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %1) + %32 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %31) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %32, %Qubit* %target) + %33 = getelementptr inbounds { %Array* }, { %Array* }* %32, i32 0, i32 0 + %34 = load %Array*, %Array** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %35 = bitcast { %Array*, %Array* }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + %36 = bitcast { %Array* }* %32 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__1 + +continue__1: ; preds = %continue__3, %continue__2 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___6ad2c538b55a4168b115e6cf3c7d63de_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @12, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 1) + %15 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %qubits__1) + %16 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + %17 = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %qubits__1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, { %Array* }*, %Qubit* }* + %20 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %19, i32 0, i32 3 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients1, %Array** %21, align 8 + store { %Array* }* %16, { %Array* }** %22, align 8 + store %Qubit* %17, %Qubit** %23, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %19) + %24 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficientsPadded) + %25 = icmp eq i64 %24, 2 + br i1 %25, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %27 = bitcast i8* %26 to double* + %28 = load double, double* %27, align 8 + %29 = call double @Microsoft__Quantum__Math__AbsD__body(double %28) + %30 = fcmp ogt double %29, %tolerance + br i1 %30, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %32 = bitcast i8* %31 to i2* + store i2 0, i2* %32, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients0, i64 0) + %34 = bitcast i8* %33 to double* + %35 = load double, double* %34, align 8 + %theta = fmul double 1.000000e+00, %35 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, double, %Array* }* + %38 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 1 + %40 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %37, i32 0, i32 2 + store %Array* %paulis, %Array** %38, align 8 + store double %theta, double* %39, align 8 + store %Array* %qubits__1, %Array** %40, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %37) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 1) + %41 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %qubits__1) + %42 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %41) + call void @__quantum__rt__array_update_reference_count(%Array* %41, i32 -1) + %43 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %44 = bitcast %Tuple* %43 to { double, %Array*, { %Array* }* }* + %45 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 0 + %46 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 1 + %47 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %44, i32 0, i32 2 + store double %tolerance, double* %45, align 8 + store %Array* %coefficients0, %Array** %46, align 8 + store { %Array* }* %42, { %Array* }** %47, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %44) + %48 = getelementptr inbounds { %Array* }, { %Array* }* %42, i32 0, i32 0 + %49 = load %Array*, %Array** %48, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %49, i32 -1) + %50 = bitcast { %Array* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %50, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + %51 = getelementptr inbounds { %Array* }, { %Array* }* %16, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %coefficientsPadded = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %coefficientsPadded) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %coefficients0 = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %coefficients1 = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients0, { %Array* }* %control, %Qubit* %target) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceD____body(double %tolerance, %Array* %coefficients1) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %tolerance, %Array* %coefficients1, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficientsPadded, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients1, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctl(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %qubits = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %qubits, i32 0, i32 0 + %qubits__1 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + %5 = bitcast { %Array* }* %qubits to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call i1 @Microsoft__Quantum__Arrays___6ad2c538b55a4168b115e6cf3c7d63de_IsEmpty__body(%Array* %qubits__1) + br i1 %6, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([75 x i8], [75 x i8]* @12, i32 0, i32 0)) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__fail(%String* %7) + unreachable + +continue__1: ; preds = %entry + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits__1) + %9 = trunc i64 %8 to i32 + %10 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %9) + %11 = fptosi double %10 to i64 + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %11, double 0.000000e+00, %Array* %coefficients) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %12 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %13 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %13, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %14 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %12, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %__qsVar0__coefficientsPadded__) + %16 = icmp eq i64 %15, 2 + br i1 %16, label %then0__2, label %else__1 + +then0__2: ; preds = %continue__1 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %18 = bitcast i8* %17 to double* + %19 = load double, double* %18, align 8 + %20 = call double @Microsoft__Quantum__Math__AbsD__body(double %19) + %21 = fcmp ogt double %20, %tolerance + br i1 %21, label %then0__3, label %continue__3 + +then0__3: ; preds = %then0__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %23 = bitcast i8* %22 to i2* + store i2 0, i2* %23, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__qsVar1__coefficients0__, i64 0) + %25 = bitcast i8* %24 to double* + %26 = load double, double* %25, align 8 + %theta = fmul double 1.000000e+00, %26 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, double, %Array* }* + %29 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %28, i32 0, i32 2 + store %Array* %paulis, %Array** %29, align 8 + store double %theta, double* %30, align 8 + store %Array* %qubits__1, %Array** %31, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %28) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %continue__3 + +continue__3: ; preds = %then0__3, %then0__2 + br label %continue__2 + +else__1: ; preds = %continue__1 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 1) + %32 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %qubits__1) + %33 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %32) + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + %34 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %35 = bitcast %Tuple* %34 to { double, %Array*, { %Array* }* }* + %36 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 0 + %37 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 1 + %38 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %35, i32 0, i32 2 + store double %tolerance, double* %36, align 8 + store %Array* %__qsVar1__coefficients0__, %Array** %37, align 8 + store { %Array* }* %33, { %Array* }** %38, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %35) + %39 = getelementptr inbounds { %Array* }, { %Array* }* %33, i32 0, i32 0 + %40 = load %Array*, %Array** %39, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %40, i32 -1) + %41 = bitcast { %Array* }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + br label %continue__2 + +continue__2: ; preds = %else__1, %continue__3 + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 1) + %42 = call %Array* @Microsoft__Quantum__Arrays___dfadd79ae46c48ca8fb83bba76dc75a9_Most__body(%Array* %qubits__1) + %43 = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %42) + call void @__quantum__rt__array_update_reference_count(%Array* %42, i32 -1) + %44 = call %Qubit* @Microsoft__Quantum__Arrays___ec2282f4e2d04e26843a4c1b80ee157c_Tail__body(%Array* %qubits__1) + %45 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %46 = bitcast %Tuple* %45 to { double, %Array*, { %Array* }*, %Qubit* }* + %47 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 0 + %48 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 1 + %49 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 2 + %50 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %46, i32 0, i32 3 + store double %tolerance, double* %47, align 8 + store %Array* %__qsVar2__coefficients1__, %Array** %48, align 8 + store { %Array* }* %43, { %Array* }** %49, align 8 + store %Qubit* %44, %Qubit** %50, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }*, %Qubit* }* %46) + %51 = getelementptr inbounds { %Array* }, { %Array* }* %43, i32 0, i32 0 + %52 = load %Array*, %Array** %51, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits__1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %53 = bitcast { %Array*, %Array* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + %54 = bitcast { %Array* }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %54, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %45, i32 -1) + ret void +} + +declare void @__quantum__qis__exp__ctladj(%Array*, { %Array*, double, %Array* }*) + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %controlRegister, { double, %Array*, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %control = load { %Array* }*, { %Array* }** %3, align 8 + %4 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + %6 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + %7 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %target = load %Qubit*, %Qubit** %7, align 8 + %8 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %9 = add i64 %8, 1 + %10 = trunc i64 %9 to i32 + %11 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %10) + %12 = fptosi double %11 to i64 + %13 = trunc i64 %8 to i32 + %14 = call double @llvm.powi.f64.i32(double -2.000000e+00, i32 %13) + %15 = fptosi double %14 to i64 + %16 = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %15, double 0.000000e+00, %Array* %coefficients) + %__qsVar0__coefficientsPadded__ = call %Array* @Microsoft__Quantum__Arrays___61d0534fed56455782ee55a16a1606c3_Padded__body(i64 %12, double 0.000000e+00, %Array* %16) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 1) + %17 = call { %Array*, %Array* }* @Microsoft__Quantum__Canon____QsRef1__MultiplexZCoefficients____body(%Array* %__qsVar0__coefficientsPadded__) + %18 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 0 + %__qsVar1__coefficients0__ = load %Array*, %Array** %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 1) + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %17, i32 0, i32 1 + %__qsVar2__coefficients1__ = load %Array*, %Array** %19, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 1) + %20 = call i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceD____body(double %tolerance, %Array* %__qsVar2__coefficients1__) + br i1 %20, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar2__coefficients1__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + call void @__quantum__qis__x__ctl(%Array* %controlRegister, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %tolerance, %Array* %__qsVar1__coefficients0__, { %Array* }* %control, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__qsVar2__coefficients1__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar0__coefficientsPadded__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar1__coefficients0__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__qsVar2__coefficients1__, i32 -1) + %21 = bitcast { %Array*, %Array* }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__17__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %target, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %14 = icmp eq i2 %pauli, 1 + br i1 %14, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %15 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %18 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 1 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 2 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 3 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %17, i32 0, i32 4 + store %Callable* %15, %Callable** %18, align 8 + store double %tolerance, double* %19, align 8 + store %Array* %coefficients, %Array** %20, align 8 + store i2 -2, i2* %21, align 1 + store { %Array* }* %control, { %Array* }** %22, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__18__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %16) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__body(%Callable* %23, %Callable* %op__1, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %24 = icmp eq i2 %pauli, -1 + br i1 %24, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %25 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 1 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 2 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 3 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %27, i32 0, i32 4 + store %Callable* %25, %Callable** %28, align 8 + store double %tolerance, double* %29, align 8 + store %Array* %coefficients, %Array** %30, align 8 + store i2 1, i2* %31, align 1 + store { %Array* }* %control, { %Array* }** %32, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__19__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %26) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %33 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__body(%Callable* %33, %Callable* %op__2, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %34 = icmp eq i2 %pauli, 0 + br i1 %34, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__body(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %35 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @13, i32 0, i32 0)) + %36 = icmp eq i2 1, %pauli + br i1 %36, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %37 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @14, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %38 = icmp eq i2 -1, %pauli + br i1 %38, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %39 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @15, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %40 = icmp eq i2 -2, %pauli + br i1 %40, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @16, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @17, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %43 = phi %String* [ %41, %condTrue__3 ], [ %42, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %44 = phi %String* [ %39, %condTrue__2 ], [ %43, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %45 = phi %String* [ %37, %condTrue__1 ], [ %44, %condContinue__2 ] + %46 = call %String* @__quantum__rt__string_concatenate(%String* %35, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + %47 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + %48 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %48) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__17__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__body(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = load double, double* %1, align 8 + %6 = load %Array*, %Array** %2, align 8 + %7 = load { %Array* }*, { %Array* }** %3, align 8 + %8 = load %Qubit*, %Qubit** %4, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__adj(double %5, %Array* %6, { %Array* }* %7, %Qubit* %8) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctl(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, { %Array* }*, %Qubit* }*, { double, %Array*, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__ctladj(%Array* %3, { double, %Array*, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__15__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__15__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__18__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__18__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Array* }*, { %Array* }** %4, align 8 + %10 = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %6, %Array* %7, i2 %8, { %Array* }* %9, %Qubit* %10) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Array* }*, %Qubit* }*, { double, %Array*, i2, { %Array* }*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %3, { double, %Array*, i2, { %Array* }*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__16__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__16__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = getelementptr inbounds { %Array* }, { %Array* }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 %count-change) + %9 = bitcast { %Array* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__body(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Qubit* }* + %2 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %1, i32 0, i32 0 + store %Qubit* %target, %Qubit** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %0, %Tuple* null) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit* }* + %5 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %4, i32 0, i32 0 + store %Qubit* %target, %Qubit** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %innerOperation, %Tuple* %3, %Tuple* null) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Qubit* }* + %9 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %8, i32 0, i32 0 + store %Qubit* %target, %Qubit** %9, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Lifted__PartialApplication__19__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__19__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %coefficients, i2 %pauli, { %Array* }* %control, %Qubit* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = icmp eq i2 %pauli, -2 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double, %Array*, { %Array* }* }* + %7 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %6, i32 0, i32 3 + store %Callable* %4, %Callable** %7, align 8 + store double %tolerance, double* %8, align 8 + store %Array* %coefficients, %Array** %9, align 8 + store { %Array* }* %control, { %Array* }** %10, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__20__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %5) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %11) + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Qubit* }* + %14 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %13, i32 0, i32 0 + store %Qubit* %target, %Qubit** %14, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %12, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %15 = icmp eq i2 %pauli, 1 + br i1 %15, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %16 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 1 + %21 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 2 + %22 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 3 + %23 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %18, i32 0, i32 4 + store %Callable* %16, %Callable** %19, align 8 + store double %tolerance, double* %20, align 8 + store %Array* %coefficients, %Array** %21, align 8 + store i2 -2, i2* %22, align 1 + store { %Array* }* %control, { %Array* }** %23, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__21__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %17) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %24 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__adj(%Callable* %24, %Callable* %__qsVar1__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %25 = icmp eq i2 %pauli, -1 + br i1 %25, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %26 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 1 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 2 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 3 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %28, i32 0, i32 4 + store %Callable* %26, %Callable** %29, align 8 + store double %tolerance, double* %30, align 8 + store %Array* %coefficients, %Array** %31, align 8 + store i2 1, i2* %32, align 1 + store { %Array* }* %control, { %Array* }** %33, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__22__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %27) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %34 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %34) + call void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__adj(%Callable* %34, %Callable* %__qsVar2__op__, %Qubit* %target) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %34, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %34, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %35 = icmp eq i2 %pauli, 0 + br i1 %35, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__adj(double %tolerance, %Array* %coefficients, { %Array* }* %control) + br label %continue__1 + +else__1: ; preds = %test3__1 + %36 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @13, i32 0, i32 0)) + %37 = icmp eq i2 1, %pauli + br i1 %37, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %38 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @14, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %39 = icmp eq i2 -1, %pauli + br i1 %39, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %40 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @15, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %41 = icmp eq i2 -2, %pauli + br i1 %41, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @16, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %43 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @17, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %44 = phi %String* [ %42, %condTrue__3 ], [ %43, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %45 = phi %String* [ %40, %condTrue__2 ], [ %44, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %46 = phi %String* [ %38, %condTrue__1 ], [ %45, %condContinue__2 ] + %47 = call %String* @__quantum__rt__string_concatenate(%String* %36, %String* %46) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + %48 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + %49 = call %String* @__quantum__rt__string_concatenate(%String* %47, %String* %48) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__fail(%String* %49) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %op = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__23__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %op__1 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__24__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %op__1, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__1, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %op__2 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__25__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %op__2, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__ctl(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op__2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctl(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @13, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @14, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @15, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @16, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @17, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 1 + %coefficients = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 2 + %pauli = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 3 + %control = load { %Array* }*, { %Array* }** %4, align 8 + %5 = getelementptr inbounds { %Array* }, { %Array* }* %control, i32 0, i32 0 + %6 = load %Array*, %Array** %5, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 1) + %7 = bitcast { %Array* }* %control to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + %8 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %0, i32 0, i32 4 + %target = load %Qubit*, %Qubit** %8, align 8 + %9 = icmp eq i2 %pauli, -2 + br i1 %9, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexZ__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Callable*, double, %Array*, { %Array* }* }* + %13 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 2 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %12, i32 0, i32 3 + store %Callable* %10, %Callable** %13, align 8 + store double %tolerance, double* %14, align 8 + store %Array* %coefficients, %Array** %15, align 8 + store { %Array* }* %control, { %Array* }** %16, align 8 + %__qsVar0__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__26__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__15__FunctionTable, %Tuple* %11) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %17 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %17) + call void @__quantum__rt__callable_make_controlled(%Callable* %17) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, %Qubit* }* + %20 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %19, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %20, align 8 + store %Qubit* %target, %Qubit** %21, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %18, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %17, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %22 = icmp eq i2 %pauli, 1 + br i1 %22, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + %23 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %24 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %25 = bitcast %Tuple* %24 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %26 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 0 + %27 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 1 + %28 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 2 + %29 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 3 + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %25, i32 0, i32 4 + store %Callable* %23, %Callable** %26, align 8 + store double %tolerance, double* %27, align 8 + store %Array* %coefficients, %Array** %28, align 8 + store i2 -2, i2* %29, align 1 + store { %Array* }* %control, { %Array* }** %30, align 8 + %__qsVar1__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__27__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %24) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 1) + %31 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 1) + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { %Callable*, %Callable*, %Qubit* }* + %34 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 1 + %36 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %33, i32 0, i32 2 + store %Callable* %31, %Callable** %34, align 8 + store %Callable* %__qsVar1__op__, %Callable** %35, align 8 + store %Qubit* %target, %Qubit** %36, align 8 + call void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %33) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar1__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %37 = icmp eq i2 %pauli, -1 + br i1 %37, label %then2__1, label %test3__1 + +then2__1: ; preds = %test2__1 + %38 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, { %Array* }* }* getelementptr ({ %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* null, i32 1) to i64)) + %40 = bitcast %Tuple* %39 to { %Callable*, double, %Array*, i2, { %Array* }* }* + %41 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 0 + %42 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 1 + %43 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 2 + %44 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 3 + %45 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %40, i32 0, i32 4 + store %Callable* %38, %Callable** %41, align 8 + store double %tolerance, double* %42, align 8 + store %Array* %coefficients, %Array** %43, align 8 + store i2 1, i2* %44, align 1 + store { %Array* }* %control, { %Array* }** %45, align 8 + %__qsVar2__op__ = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__28__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__16__FunctionTable, %Tuple* %39) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 1) + %46 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__S__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__callable_make_adjoint(%Callable* %46) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 1) + %47 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable*, %Qubit* }* getelementptr ({ %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* null, i32 1) to i64)) + %48 = bitcast %Tuple* %47 to { %Callable*, %Callable*, %Qubit* }* + %49 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 0 + %50 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 1 + %51 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %48, i32 0, i32 2 + store %Callable* %46, %Callable** %49, align 8 + store %Callable* %__qsVar2__op__, %Callable** %50, align 8 + store %Qubit* %target, %Qubit** %51, align 8 + call void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Callable*, %Qubit* }* %48) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %46, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %__qsVar2__op__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + br label %continue__1 + +test3__1: ; preds = %test2__1 + %52 = icmp eq i2 %pauli, 0 + br i1 %52, label %then3__1, label %else__1 + +then3__1: ; preds = %test3__1 + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 1) + %53 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }* }* getelementptr ({ double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* null, i32 1) to i64)) + %54 = bitcast %Tuple* %53 to { double, %Array*, { %Array* }* }* + %55 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 0 + %56 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 1 + %57 = getelementptr inbounds { double, %Array*, { %Array* }* }, { double, %Array*, { %Array* }* }* %54, i32 0, i32 2 + store double %tolerance, double* %55, align 8 + store %Array* %coefficients, %Array** %56, align 8 + store { %Array* }* %control, { %Array* }** %57, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyApplyDiagonalUnitary__ctladj(%Array* %__controlQubits__, { double, %Array*, { %Array* }* }* %54) + call void @__quantum__rt__array_update_reference_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 -1) + br label %continue__1 + +else__1: ; preds = %test3__1 + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([38 x i8], [38 x i8]* @13, i32 0, i32 0)) + %59 = icmp eq i2 1, %pauli + br i1 %59, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %else__1 + %60 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @14, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %else__1 + %61 = icmp eq i2 -1, %pauli + br i1 %61, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condFalse__1 + %62 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @15, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condFalse__1 + %63 = icmp eq i2 -2, %pauli + br i1 %63, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condFalse__2 + %64 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @16, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condFalse__2 + %65 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @17, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %66 = phi %String* [ %64, %condTrue__3 ], [ %65, %condFalse__3 ] + br label %condContinue__2 + +condContinue__2: ; preds = %condContinue__3, %condTrue__2 + %67 = phi %String* [ %62, %condTrue__2 ], [ %66, %condContinue__3 ] + br label %condContinue__1 + +condContinue__1: ; preds = %condContinue__2, %condTrue__1 + %68 = phi %String* [ %60, %condTrue__1 ], [ %67, %condContinue__2 ] + %69 = call %String* @__quantum__rt__string_concatenate(%String* %58, %String* %68) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %68, i32 -1) + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + %71 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %70) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %70, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + call void @__quantum__rt__fail(%String* %71) + unreachable + +continue__1: ; preds = %then3__1, %then2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %6, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__20__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__21__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__adj(%Callable* %outerOperation, %Callable* %innerOperation, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Qubit* }* + %3 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %2, i32 0, i32 0 + store %Qubit* %target, %Qubit** %3, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %1, %Tuple* null) + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Qubit* }* + %11 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %10, i32 0, i32 0 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__22__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__23__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__23__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__23__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__23__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__24__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__ctl(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Qubit* }* + %6 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %5, i32 0, i32 0 + store %Qubit* %target, %Qubit** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %outerOperation, %Tuple* %4, %Tuple* null) + %7 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %7) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Qubit* }* + %10 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %9, i32 0, i32 1 + store %Array* %controlRegister, %Array** %10, align 8 + store %Qubit* %target, %Qubit** %11, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %7, %Tuple* %8, %Tuple* null) + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Qubit* }* + %15 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %14, i32 0, i32 0 + store %Qubit* %target, %Qubit** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__25__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %17, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 3 + %6 = load { %Array* }*, { %Array* }** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %8 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %7, i32 0, i32 0 + %9 = load %Qubit*, %Qubit** %8, align 8 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, %Array*, { %Array* }*, %Qubit* }* + %12 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 2 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %11, i32 0, i32 3 + store double %2, double* %12, align 8 + store %Array* %4, %Array** %13, align 8 + store { %Array* }* %6, { %Array* }** %14, align 8 + store %Qubit* %9, %Qubit** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %0, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %10, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__26__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 3 + %11 = load { %Array* }*, { %Array* }** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, { %Array* }*, %Qubit* }, { double, %Array*, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + store double %7, double* %14, align 8 + store %Array* %9, %Array** %15, align 8 + store { %Array* }* %11, { %Array* }** %16, align 8 + store %Qubit* %4, %Qubit** %17, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* + %20 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, { %Array* }*, %Qubit* }* }* %19, i32 0, i32 1 + store %Array* %3, %Array** %20, align 8 + store { double, %Array*, { %Array* }*, %Qubit* }* %13, { double, %Array*, { %Array* }*, %Qubit* }** %21, align 8 + %22 = getelementptr inbounds { %Callable*, double, %Array*, { %Array* }* }, { %Callable*, double, %Array*, { %Array* }* }* %5, i32 0, i32 0 + %23 = load %Callable*, %Callable** %22, align 8 + %24 = call %Callable* @__quantum__rt__callable_copy(%Callable* %23, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %24) + call void @__quantum__rt__callable_make_controlled(%Callable* %24) + call void @__quantum__rt__callable_invoke(%Callable* %24, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %24, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %24, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__27__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__27__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__27__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__27__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___17891ac5051847878c757c806b3b7604_ApplyWithCA__ctladj(%Array* %controlRegister, { %Callable*, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 1) + %1 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %outerOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %innerOperation = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 1) + %3 = getelementptr inbounds { %Callable*, %Callable*, %Qubit* }, { %Callable*, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %target, %Qubit** %7, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + %8 = call %Callable* @__quantum__rt__callable_copy(%Callable* %innerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %8) + call void @__quantum__rt__callable_make_adjoint(%Callable* %8) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 1) + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %10 = bitcast %Tuple* %9 to { %Array*, %Qubit* }* + %11 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 0 + %12 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %10, i32 0, i32 1 + store %Array* %controlRegister, %Array** %11, align 8 + store %Qubit* %target, %Qubit** %12, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %8, %Tuple* %9, %Tuple* null) + %13 = call %Callable* @__quantum__rt__callable_copy(%Callable* %outerOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %13) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { %Qubit* }* + %16 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %15, i32 0, i32 0 + store %Qubit* %target, %Qubit** %16, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %13, %Tuple* %14, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %outerOperation, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %innerOperation, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %controlRegister, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 4 + %8 = load { %Array* }*, { %Array* }** %7, align 8 + %9 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %10 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %9, i32 0, i32 0 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %14 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 2 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 3 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %13, i32 0, i32 4 + store double %2, double* %14, align 8 + store %Array* %4, %Array** %15, align 8 + store i2 %6, i2* %16, align 1 + store { %Array* }* %8, { %Array* }** %17, align 8 + store %Qubit* %11, %Qubit** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %0, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__28__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, { %Array* }* }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 4 + %13 = load { %Array* }*, { %Array* }** %12, align 8 + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %16 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 2 + %19 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 3 + %20 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, i32 0, i32 4 + store double %7, double* %16, align 8 + store %Array* %9, %Array** %17, align 8 + store i2 %11, i2* %18, align 1 + store { %Array* }* %13, { %Array* }** %19, align 8 + store %Qubit* %4, %Qubit** %20, align 8 + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* + %23 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }, { %Array*, { double, %Array*, i2, { %Array* }*, %Qubit* }* }* %22, i32 0, i32 1 + store %Array* %3, %Array** %23, align 8 + store { double, %Array*, i2, { %Array* }*, %Qubit* }* %15, { double, %Array*, i2, { %Array* }*, %Qubit* }** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, { %Array* }* }, { %Callable*, double, %Array*, i2, { %Array* }* }* %5, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_make_controlled(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %21, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rng) { +entry: + %0 = extractvalue %Range %rng, 0 + %1 = extractvalue %Range %rng, 1 + %2 = extractvalue %Range %rng, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %3 = icmp sgt i64 %1, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idx = phi i64 [ %0, %preheader__1 ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %idx, %2 + %5 = icmp sge i64 %idx, %2 + %6 = select i1 %3, i1 %4, i1 %5 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + ret i1 false + +exiting__1: ; No predecessors! + %7 = add i64 %idx, %1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret i1 true +} + +define internal void @Lifted__PartialApplication__29__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, { %Array* }* }* + %5 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array*, { %Array* }* }* + %5 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %4, i32 0, i32 1 + store %Array* %2, %Array** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, { %Array* }* }* }* getelementptr ({ %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, { %Array* }* }* %9, { %Array*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__29__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, { %Array* }* }* + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, { %Array* }* }* }* getelementptr ({ %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, { %Array* }* }* %9, { %Array*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____body(%Array* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____adj(%Array* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, { %Array* }* }*, { %Array*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____ctl(%Array* %3, { %Array*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, { %Array* }* }* }, { %Array*, { %Array*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, { %Array* }* }*, { %Array*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____ctladj(%Array* %3, { %Array*, { %Array* }* }* %4) + ret void +} + +define internal void @MemoryManagement__17__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__17__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____body(%Array* %operations, { %Array* }* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %8 = getelementptr inbounds { %Array* }, { %Array* }* %target, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = bitcast { %Array* }* %target to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %12) + %15 = bitcast i8* %14 to %Callable** + %op = load %Callable*, %Callable** %15, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %10, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %17 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %18 = phi i64 [ 0, %exit__2 ], [ %23, %exiting__3 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %22 = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %22, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %23 = add i64 %18, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____adj(%Array* %operations, { %Array* }* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %8 = getelementptr inbounds { %Array* }, { %Array* }* %target, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = bitcast { %Array* }* %target to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %11 = sub i64 %0, 1 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %14, i1 true) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %15) + %17 = sub i64 %16, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %23, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %10, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %23 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %24 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %25 = phi i64 [ 0, %exit__2 ], [ %30, %exiting__3 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %25) + %28 = bitcast i8* %27 to %Callable** + %29 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %29, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %30 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____ctl(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %target = load { %Array* }*, { %Array* }** %10, align 8 + %11 = getelementptr inbounds { %Array* }, { %Array* }* %target, i32 0, i32 0 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { %Array* }* %target to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %15) + %18 = bitcast i8* %17 to %Callable** + %op = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %19) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %20 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 1) + %21 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %22 = bitcast %Tuple* %21 to { %Array*, { %Array* }* }* + %23 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %22, i32 0, i32 0 + %24 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %22, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %23, align 8 + store { %Array* }* %target, { %Array* }** %24, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %21, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %26 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %27 = phi i64 [ 0, %exit__2 ], [ %32, %exiting__3 ] + %28 = icmp sle i64 %27, %26 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %27) + %30 = bitcast i8* %29 to %Callable** + %31 = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %31, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %32 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + %33 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___64e06f1afcfe4ab9be6fe951b3a50440___QsRef1__ApplyBoundCA____ctladj(%Array* %__controlQubits__, { %Array*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %target = load { %Array* }*, { %Array* }** %10, align 8 + %11 = getelementptr inbounds { %Array* }, { %Array* }* %target, i32 0, i32 0 + %12 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = bitcast { %Array* }* %target to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 1) + %14 = sub i64 %2, 1 + %15 = insertvalue %Range zeroinitializer, i64 %14, 0 + %16 = insertvalue %Range %15, i64 -1, 1 + %17 = insertvalue %Range %16, i64 0, 2 + %18 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %17, i1 true) + %19 = call i64 @__quantum__rt__array_get_size_1d(%Array* %18) + %20 = sub i64 %19, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %21 = phi i64 [ 0, %exit__1 ], [ %31, %exiting__2 ] + %22 = icmp sle i64 %21, %20 + br i1 %22, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %21) + %24 = bitcast i8* %23 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %24, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %25 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %25) + call void @__quantum__rt__callable_make_controlled(%Callable* %25) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %26 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 1) + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array* }* }* getelementptr ({ %Array*, { %Array* }* }, { %Array*, { %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { %Array* }* }* + %29 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %28, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %29, align 8 + store { %Array* }* %target, { %Array* }** %30, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %25, %Tuple* %27, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %25, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %26, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %21, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %32 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %33 = phi i64 [ 0, %exit__2 ], [ %38, %exiting__3 ] + %34 = icmp sle i64 %33, %32 + br i1 %34, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %33) + %36 = bitcast i8* %35 to %Callable** + %37 = load %Callable*, %Callable** %36, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %37, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %37, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %38 = add i64 %33, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + %39 = load %Array*, %Array** %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %13, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %18, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___7af5478fdcef46609b03bed279a41c2b_BoundCA__body(%Array* %operations) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %8 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %9 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %10 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %11 = icmp sle i64 %10, %9 + br i1 %11, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %10) + %13 = bitcast i8* %12 to %Callable** + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %14, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %10, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %operations, i32 1) + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Callable*, %Array* }* + %18 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %17, i32 0, i32 1 + store %Callable* %8, %Callable** %18, align 8 + store %Array* %operations, %Array** %19, align 8 + %20 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__30__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__18__FunctionTable, %Tuple* %16) + %21 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %22 = phi i64 [ 0, %exit__2 ], [ %27, %exiting__3 ] + %23 = icmp sle i64 %22, %21 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %22) + %25 = bitcast i8* %24 to %Callable** + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %26, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %26, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %27 = add i64 %22, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + ret %Callable* %20 +} + +define internal void @Lifted__PartialApplication__30__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %2 = load %Array*, %Array** %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Array* }* + %4 = getelementptr inbounds { %Array* }, { %Array* }* %3, i32 0, i32 0 + %5 = load %Array*, %Array** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { %Array*, %Array* }* + %8 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %7, i32 0, i32 1 + store %Array* %2, %Array** %8, align 8 + store %Array* %5, %Array** %9, align 8 + %10 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__30__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %6 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 1 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Array*, %Array* }* + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %9, i32 0, i32 1 + store %Array* %7, %Array** %10, align 8 + store %Array* %4, %Array** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Array*, %Array* }* }* getelementptr ({ %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Array*, %Array* }* }* + %14 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Array*, %Array* }* %9, { %Array*, %Array* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____body(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + call void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____adj(%Array* %3, %Array* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____ctl(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array*, %Array* }* }, { %Array*, { %Array*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array*, %Array* }*, { %Array*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____ctladj(%Array* %3, { %Array*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__18__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__18__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Array* }* + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %4 = load %Array*, %Array** %3, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %7) + %10 = bitcast i8* %9 to %Callable** + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %11, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %11, i32 %count-change) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____body(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %9 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %10 = icmp sle i64 %9, %8 + br i1 %10, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %9) + %12 = bitcast i8* %11 to %Callable** + %op = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %13 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %14 = bitcast %Tuple* %13 to { %Array* }* + %15 = getelementptr inbounds { %Array* }, { %Array* }* %14, i32 0, i32 0 + store %Array* %target, %Array** %15, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %13, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %9, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %17 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %18 = phi i64 [ 0, %exit__2 ], [ %23, %exiting__3 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %22 = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %22, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %23 = add i64 %18, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____adj(%Array* %operations, %Array* %target) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %2) + %5 = bitcast i8* %4 to %Callable** + %6 = load %Callable*, %Callable** %5, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %8 = sub i64 %0, 1 + %9 = insertvalue %Range zeroinitializer, i64 %8, 0 + %10 = insertvalue %Range %9, i64 -1, 1 + %11 = insertvalue %Range %10, i64 0, 2 + %12 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %11, i1 true) + %13 = call i64 @__quantum__rt__array_get_size_1d(%Array* %12) + %14 = sub i64 %13, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %15 = phi i64 [ 0, %exit__1 ], [ %23, %exiting__2 ] + %16 = icmp sle i64 %15, %14 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %12, i64 %15) + %18 = bitcast i8* %17 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %18, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { %Array* }* + %22 = getelementptr inbounds { %Array* }, { %Array* }* %21, i32 0, i32 0 + store %Array* %target, %Array** %22, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %20, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %23 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %24 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %25 = phi i64 [ 0, %exit__2 ], [ %30, %exiting__3 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %25) + %28 = bitcast i8* %27 to %Callable** + %29 = load %Callable*, %Callable** %28, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %29, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %29, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %30 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %12) + %15 = bitcast i8* %14 to %Callable** + %op = load %Callable*, %Callable** %15, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %16 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %16) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, %Array* }* + %19 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %18, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %19, align 8 + store %Array* %target, %Array** %20, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %16, %Tuple* %17, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %16, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %22 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %28, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %23) + %26 = bitcast i8* %25 to %Callable** + %27 = load %Callable*, %Callable** %26, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %27, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %28 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___71f14e6f07364d01825c873fe6a150ab___QsRef1__ApplyBoundCA____ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %operations = load %Array*, %Array** %1, align 8 + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %operations) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %4) + %7 = bitcast i8* %6 to %Callable** + %8 = load %Callable*, %Callable** %7, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %8, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 1) + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %11 = sub i64 %2, 1 + %12 = insertvalue %Range zeroinitializer, i64 %11, 0 + %13 = insertvalue %Range %12, i64 -1, 1 + %14 = insertvalue %Range %13, i64 0, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %operations, %Range %14, i1 true) + %16 = call i64 @__quantum__rt__array_get_size_1d(%Array* %15) + %17 = sub i64 %16, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %27, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %18) + %21 = bitcast i8* %20 to %Callable** + %__qsVar0__op__ = load %Callable*, %Callable** %21, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 1) + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %__qsVar0__op__, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Array*, %Array* }* + %25 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 0 + %26 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %24, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %25, align 8 + store %Array* %target, %Array** %26, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %__qsVar0__op__, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %27 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + %28 = sub i64 %2, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %29 = phi i64 [ 0, %exit__2 ], [ %34, %exiting__3 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %operations, i64 %29) + %32 = bitcast i8* %31 to %Callable** + %33 = load %Callable*, %Callable** %32, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %33, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %34 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %operations, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + ret void +} + +define internal %Callable* @Microsoft__Quantum__Canon___89057b82e6d84103ba7c024aa441bb1f_CControlledCA__body(%Callable* %op) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %op, i32 1) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Callable* }* getelementptr ({ %Callable*, %Callable* }, { %Callable*, %Callable* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Callable*, %Callable* }* + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %2, i32 0, i32 0 + %4 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %2, i32 0, i32 1 + store %Callable* %0, %Callable** %3, align 8 + store %Callable* %op, %Callable** %4, align 8 + %5 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__31__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__19__FunctionTable, %Tuple* %1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret %Callable* %5 +} + +define internal void @Lifted__PartialApplication__31__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, %Qubit* }* + %1 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %0, i32 0, i32 0 + %2 = load i1, i1* %1, align 1 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %3, i32 0, i32 1 + %5 = load %Callable*, %Callable** %4, align 8 + %6 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %0, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Callable*, %Qubit* }* getelementptr ({ i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { i1, %Callable*, %Qubit* }* + %10 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %9, i32 0, i32 2 + store i1 %2, i1* %10, align 1 + store %Callable* %5, %Callable** %11, align 8 + store %Qubit* %7, %Qubit** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %3, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, %Qubit* }* + %1 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %0, i32 0, i32 0 + %2 = load i1, i1* %1, align 1 + %3 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %4 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %3, i32 0, i32 1 + %5 = load %Callable*, %Callable** %4, align 8 + %6 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %0, i32 0, i32 1 + %7 = load %Qubit*, %Qubit** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Callable*, %Qubit* }* getelementptr ({ i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { i1, %Callable*, %Qubit* }* + %10 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %9, i32 0, i32 2 + store i1 %2, i1* %10, align 1 + store %Callable* %5, %Callable** %11, align 8 + store %Qubit* %7, %Qubit** %12, align 8 + %13 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %3, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i1, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i1, %Qubit* }*, { i1, %Qubit* }** %2, align 8 + %5 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %4, i32 0, i32 0 + %6 = load i1, i1* %5, align 1 + %7 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %7, i32 0, i32 1 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %4, i32 0, i32 1 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Callable*, %Qubit* }* getelementptr ({ i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i1, %Callable*, %Qubit* }* + %14 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %13, i32 0, i32 2 + store i1 %6, i1* %14, align 1 + store %Callable* %9, %Callable** %15, align 8 + store %Qubit* %11, %Qubit** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i1, %Callable*, %Qubit* }* }* getelementptr ({ %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { i1, %Callable*, %Qubit* }* }* + %19 = getelementptr inbounds { %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { i1, %Callable*, %Qubit* }* %13, { i1, %Callable*, %Qubit* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %7, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__31__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i1, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i1, %Qubit* }*, { i1, %Qubit* }** %2, align 8 + %5 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %4, i32 0, i32 0 + %6 = load i1, i1* %5, align 1 + %7 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %7, i32 0, i32 1 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %4, i32 0, i32 1 + %11 = load %Qubit*, %Qubit** %10, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Callable*, %Qubit* }* getelementptr ({ i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i1, %Callable*, %Qubit* }* + %14 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %13, i32 0, i32 1 + %16 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %13, i32 0, i32 2 + store i1 %6, i1* %14, align 1 + store %Callable* %9, %Callable** %15, align 8 + store %Qubit* %11, %Qubit** %16, align 8 + %17 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i1, %Callable*, %Qubit* }* }* getelementptr ({ %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* null, i32 1) to i64)) + %18 = bitcast %Tuple* %17 to { %Array*, { i1, %Callable*, %Qubit* }* }* + %19 = getelementptr inbounds { %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* %18, i32 0, i32 0 + %20 = getelementptr inbounds { %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* %18, i32 0, i32 1 + store %Array* %3, %Array** %19, align 8 + store { i1, %Callable*, %Qubit* }* %13, { i1, %Callable*, %Qubit* }** %20, align 8 + %21 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %7, i32 0, i32 0 + %22 = load %Callable*, %Callable** %21, align 8 + %23 = call %Callable* @__quantum__rt__callable_copy(%Callable* %22, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %23) + call void @__quantum__rt__callable_make_controlled(%Callable* %23) + call void @__quantum__rt__callable_invoke(%Callable* %23, %Tuple* %17, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %17, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %23, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %23, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %4 = load i1, i1* %1, align 1 + %5 = load %Callable*, %Callable** %2, align 8 + %6 = load %Qubit*, %Qubit** %3, align 8 + call void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__body(i1 %4, %Callable* %5, %Qubit* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, %Callable*, %Qubit* }* + %1 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %4 = load i1, i1* %1, align 1 + %5 = load %Callable*, %Callable** %2, align 8 + %6 = load %Qubit*, %Qubit** %3, align 8 + call void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__adj(i1 %4, %Callable* %5, %Qubit* %6) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i1, %Callable*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i1, %Callable*, %Qubit* }*, { i1, %Callable*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__ctl(%Array* %3, { i1, %Callable*, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i1, %Callable*, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i1, %Callable*, %Qubit* }* }, { %Array*, { i1, %Callable*, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i1, %Callable*, %Qubit* }*, { i1, %Callable*, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__ctladj(%Array* %3, { i1, %Callable*, %Qubit* }* %4) + ret void +} + +define internal void @MemoryManagement__19__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__19__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__body(i1 %bit, %Callable* %op, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + br i1 %bit, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { %Qubit* }* + %2 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %1, i32 0, i32 0 + store %Qubit* %target, %Qubit** %2, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %op, %Tuple* %0, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__adj(i1 %bit, %Callable* %op, %Qubit* %target) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + br i1 %bit, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %0 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %0) + %1 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %2 = bitcast %Tuple* %1 to { %Qubit* }* + %3 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %2, i32 0, i32 0 + store %Qubit* %target, %Qubit** %3, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %0, %Tuple* %1, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %1, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__ctl(%Array* %__controlQubits__, { i1, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %bit = load i1, i1* %1, align 1 + %2 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + br i1 %bit, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Qubit* }* + %7 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %6, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %7, align 8 + store %Qubit* %target, %Qubit** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8858b7f5bb16452095bbf2ddd9326e17_ApplyIfCA__ctladj(%Array* %__controlQubits__, { i1, %Callable*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 0 + %bit = load i1, i1* %1, align 1 + %2 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 1 + %op = load %Callable*, %Callable** %2, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 1) + %3 = getelementptr inbounds { i1, %Callable*, %Qubit* }, { i1, %Callable*, %Qubit* }* %0, i32 0, i32 2 + %target = load %Qubit*, %Qubit** %3, align 8 + br i1 %bit, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %4 = call %Callable* @__quantum__rt__callable_copy(%Callable* %op, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %4) + call void @__quantum__rt__callable_make_controlled(%Callable* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Qubit* }* + %7 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %6, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %7, align 8 + store %Qubit* %target, %Qubit** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %4, %Tuple* %5, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %op, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %op, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Canon___bfd0dc2872b54301bd24b64a2c23e89e_Snd__body(i64 %0, i1 %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i1 }* getelementptr ({ i64, i1 }, { i64, i1 }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { i64, i1 }* + %3 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %pair, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store i1 %1, i1* %4, align 1 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret i1 %1 +} + +define internal double @Microsoft__Quantum__Canon___57aa5a97323949708d5d87cf2aa9c443_Snd__body(double %0, double %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { double, double }* + %3 = getelementptr inbounds { double, double }, { double, double }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { double, double }, { double, double }* %pair, i32 0, i32 1 + store double %0, double* %3, align 8 + store double %1, double* %4, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret double %1 +} + +define internal i64 @Microsoft__Quantum__Canon___facc0657b0284c16ae2c0d999b143be0_Fst__body(i64 %0, i1 %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i1 }* getelementptr ({ i64, i1 }, { i64, i1 }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { i64, i1 }* + %3 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %pair, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store i1 %1, i1* %4, align 1 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret i64 %0 +} + +define internal double @Microsoft__Quantum__Canon___17f71f9fa0da472d90e5bc66ed171ba5_Fst__body(double %0, double %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %pair = bitcast %Tuple* %2 to { double, double }* + %3 = getelementptr inbounds { double, double }, { double, double }* %pair, i32 0, i32 0 + %4 = getelementptr inbounds { double, double }, { double, double }* %pair, i32 0, i32 1 + store double %0, double* %3, align 8 + store double %1, double* %4, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %2, i32 -1) + ret double %0 +} + +define internal void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { i1, %Qubit* }** + %6 = load { i1, %Qubit* }*, { i1, %Qubit* }** %5, align 8 + %7 = bitcast { i1, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___f1cf675f218b4a7fae00ea122aa1825f_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %13 = icmp sgt i64 %11, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %10, %preheader__1 ], [ %21, %exiting__2 ] + %14 = icmp sle i64 %idxQubit, %12 + %15 = icmp sge i64 %idxQubit, %12 + %16 = select i1 %13, i1 %14, i1 %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %18 = bitcast i8* %17 to { i1, %Qubit* }** + %19 = load { i1, %Qubit* }*, { i1, %Qubit* }** %18, align 8 + %20 = bitcast { i1, %Qubit* }* %19 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %20, %Tuple* null) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %idxQubit, %11 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %22 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %29, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %23) + %26 = bitcast i8* %25 to { i1, %Qubit* }** + %27 = load { i1, %Qubit* }*, { i1, %Qubit* }** %26, align 8 + %28 = bitcast { i1, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %29 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal %Range @Microsoft__Quantum__Arrays___f1cf675f218b4a7fae00ea122aa1825f_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i1, %Qubit* }** + %6 = load { i1, %Qubit* }*, { i1, %Qubit* }** %5, align 8 + %7 = bitcast { i1, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = sub i64 %0, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %12) + %15 = bitcast i8* %14 to { i1, %Qubit* }** + %16 = load { i1, %Qubit* }*, { i1, %Qubit* }** %15, align 8 + %17 = bitcast { i1, %Qubit* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %10 +} + +define internal void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { i1, %Qubit* }** + %6 = load { i1, %Qubit* }*, { i1, %Qubit* }** %5, align 8 + %7 = bitcast { i1, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___f1cf675f218b4a7fae00ea122aa1825f_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + %13 = sub i64 %12, %10 + %14 = sdiv i64 %13, %11 + %15 = mul i64 %11, %14 + %16 = add i64 %10, %15 + %17 = sub i64 0, %11 + %18 = insertvalue %Range zeroinitializer, i64 %16, 0 + %19 = insertvalue %Range %18, i64 %17, 1 + %20 = insertvalue %Range %19, i64 %10, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %24 = icmp sgt i64 %22, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__2 ] + %25 = icmp sle i64 %__qsVar0__idxQubit__, %23 + %26 = icmp sge i64 %__qsVar0__idxQubit__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %28 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %28) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %30 = bitcast i8* %29 to { i1, %Qubit* }** + %31 = load { i1, %Qubit* }*, { i1, %Qubit* }** %30, align 8 + %32 = bitcast { i1, %Qubit* }* %31 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %28, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %__qsVar0__idxQubit__, %22 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %35) + %38 = bitcast i8* %37 to { i1, %Qubit* }** + %39 = load { i1, %Qubit* }*, { i1, %Qubit* }** %38, align 8 + %40 = bitcast { i1, %Qubit* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %5) + %8 = bitcast i8* %7 to { i1, %Qubit* }** + %9 = load { i1, %Qubit* }*, { i1, %Qubit* }** %8, align 8 + %10 = bitcast { i1, %Qubit* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %12 = call %Range @Microsoft__Quantum__Arrays___f1cf675f218b4a7fae00ea122aa1825f_IndexRange__body(%Array* %register) + %13 = extractvalue %Range %12, 0 + %14 = extractvalue %Range %12, 1 + %15 = extractvalue %Range %12, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %16 = icmp sgt i64 %14, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %13, %preheader__1 ], [ %29, %exiting__2 ] + %17 = icmp sle i64 %idxQubit, %15 + %18 = icmp sge i64 %idxQubit, %15 + %19 = select i1 %16, i1 %17, i1 %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %20) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %22 = bitcast i8* %21 to { i1, %Qubit* }** + %23 = load { i1, %Qubit* }*, { i1, %Qubit* }** %22, align 8 + %24 = bitcast { i1, %Qubit* }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i1, %Qubit* }* }* getelementptr ({ %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array*, { i1, %Qubit* }* }* + %27 = getelementptr inbounds { %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* %26, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %27, align 8 + store { i1, %Qubit* }* %23, { i1, %Qubit* }** %28, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %25, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %idxQubit, %14 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %30 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %37, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %31) + %34 = bitcast i8* %33 to { i1, %Qubit* }** + %35 = load { i1, %Qubit* }*, { i1, %Qubit* }** %34, align 8 + %36 = bitcast { i1, %Qubit* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %37 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___19e349a447284c0584da8df665449ea7_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %5) + %8 = bitcast i8* %7 to { i1, %Qubit* }** + %9 = load { i1, %Qubit* }*, { i1, %Qubit* }** %8, align 8 + %10 = bitcast { i1, %Qubit* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %12 = call %Range @Microsoft__Quantum__Arrays___f1cf675f218b4a7fae00ea122aa1825f_IndexRange__body(%Array* %register) + %13 = extractvalue %Range %12, 0 + %14 = extractvalue %Range %12, 1 + %15 = extractvalue %Range %12, 2 + %16 = sub i64 %15, %13 + %17 = sdiv i64 %16, %14 + %18 = mul i64 %14, %17 + %19 = add i64 %13, %18 + %20 = sub i64 0, %14 + %21 = insertvalue %Range zeroinitializer, i64 %19, 0 + %22 = insertvalue %Range %21, i64 %20, 1 + %23 = insertvalue %Range %22, i64 %13, 2 + %24 = extractvalue %Range %23, 0 + %25 = extractvalue %Range %23, 1 + %26 = extractvalue %Range %23, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %27 = icmp sgt i64 %25, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %24, %preheader__1 ], [ %40, %exiting__2 ] + %28 = icmp sle i64 %__qsVar0__idxQubit__, %26 + %29 = icmp sge i64 %__qsVar0__idxQubit__, %26 + %30 = select i1 %27, i1 %28, i1 %29 + br i1 %30, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %31 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %31) + call void @__quantum__rt__callable_make_controlled(%Callable* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %33 = bitcast i8* %32 to { i1, %Qubit* }** + %34 = load { i1, %Qubit* }*, { i1, %Qubit* }** %33, align 8 + %35 = bitcast { i1, %Qubit* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i1, %Qubit* }* }* getelementptr ({ %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, { i1, %Qubit* }* }* + %38 = getelementptr inbounds { %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, { i1, %Qubit* }* }, { %Array*, { i1, %Qubit* }* }* %37, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %38, align 8 + store { i1, %Qubit* }* %34, { i1, %Qubit* }** %39, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %31, %Tuple* %36, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %40 = add i64 %__qsVar0__idxQubit__, %25 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %41 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %42 = phi i64 [ 0, %exit__2 ], [ %48, %exiting__3 ] + %43 = icmp sle i64 %42, %41 + br i1 %43, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %42) + %45 = bitcast i8* %44 to { i1, %Qubit* }** + %46 = load { i1, %Qubit* }*, { i1, %Qubit* }** %45, align 8 + %47 = bitcast { i1, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %42, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal %Range @Microsoft__Quantum__Arrays___fca6ec94ae3342ea8545ea7003b87bf1_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Range @Microsoft__Quantum__Arrays___ea8dc357841940139fee623fefb8c332_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Range @Microsoft__Quantum__Arrays___f32399b15f5e44b594f235c5225a7400_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i64, i1 }** + %6 = load { i64, i1 }*, { i64, i1 }** %5, align 8 + %7 = bitcast { i64, i1 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = sub i64 %0, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %12) + %15 = bitcast i8* %14 to { i64, i1 }** + %16 = load { i64, i1 }*, { i64, i1 }** %15, align 8 + %17 = bitcast { i64, i1 }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %10 +} + +define internal %Range @Microsoft__Quantum__Arrays___e16320bd27aa426885e6375e173405df_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Range @Microsoft__Quantum__Arrays___51e146e9e3f741e2a6043055f76ad080_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = icmp ne i1 %actual, %expected + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Diagnostics___b01e27bf91bd4e748af9bc2d289c8960___QsRef1__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___88aec1558aed4c1cb84901b497b7f322_Subarray__body(%Array* %indices, %Array* %array) { +entry: + %sliced = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i64, i1 }** + %6 = load { i64, i1 }*, { i64, i1 }** %5, align 8 + %7 = bitcast { i64, i1 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %nSliced = call i64 @__quantum__rt__array_get_size_1d(%Array* %indices) + %9 = icmp eq i64 %nSliced, 0 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %11 = sub i64 %0, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 0) + %13 = bitcast i8* %12 to i64* + %14 = load i64, i64* %13, align 4 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %14) + %16 = bitcast i8* %15 to { i64, i1 }** + %17 = load { i64, i1 }*, { i64, i1 }** %16, align 8 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nSliced) + %19 = sub i64 %nSliced, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %11 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { i64, i1 }** + %24 = load { i64, i1 }*, { i64, i1 }** %23, align 8 + %25 = bitcast { i64, i1 }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %10 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %32, %exiting__3 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %27) + %30 = bitcast i8* %29 to { i64, i1 }** + store { i64, i1 }* %17, { i64, i1 }** %30, align 8 + %31 = bitcast { i64, i1 }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %31, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %32 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %sliced, align 8 + %33 = sub i64 %nSliced, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %34 = phi i64 [ 0, %exit__3 ], [ %40, %exiting__4 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %34) + %37 = bitcast i8* %36 to { i64, i1 }** + %38 = load { i64, i1 }*, { i64, i1 }** %37, align 8 + %39 = bitcast { i64, i1 }* %38 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %39, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %40 = add i64 %34, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %41 = sub i64 %nSliced, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %56, %exiting__5 ] + %42 = icmp sle i64 %idx, %41 + br i1 %42, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %43 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 -1) + %44 = call %Array* @__quantum__rt__array_copy(%Array* %43, i1 false) + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %indices, i64 %idx) + %46 = bitcast i8* %45 to i64* + %47 = load i64, i64* %46, align 4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %47) + %49 = bitcast i8* %48 to { i64, i1 }** + %50 = load { i64, i1 }*, { i64, i1 }** %49, align 8 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %idx) + %52 = bitcast i8* %51 to { i64, i1 }** + %53 = bitcast { i64, i1 }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %53, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %53, i32 1) + %54 = load { i64, i1 }*, { i64, i1 }** %52, align 8 + %55 = bitcast { i64, i1 }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %55, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %55, i32 -1) + store { i64, i1 }* %50, { i64, i1 }** %52, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 1) + store %Array* %44, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %43, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %56 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %57 = load %Array*, %Array** %sliced, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %indices, i32 -1) + %58 = sub i64 %0, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %59 = phi i64 [ 0, %exit__5 ], [ %65, %exiting__6 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %59) + %62 = bitcast i8* %61 to { i64, i1 }** + %63 = load { i64, i1 }*, { i64, i1 }** %62, align 8 + %64 = bitcast { i64, i1 }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %64, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %65 = add i64 %59, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %66 = call i64 @__quantum__rt__array_get_size_1d(%Array* %57) + %67 = sub i64 %66, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %68 = phi i64 [ 0, %exit__6 ], [ %74, %exiting__7 ] + %69 = icmp sle i64 %68, %67 + br i1 %69, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %70 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %57, i64 %68) + %71 = bitcast i8* %70 to { i64, i1 }** + %72 = load { i64, i1 }*, { i64, i1 }** %71, align 8 + %73 = bitcast { i64, i1 }* %72 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %74 = add i64 %68, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %57, i32 -1) + ret %Array* %57 +} + +define internal %Array* @Microsoft__Quantum__Arrays___591aa3c3d09b40fd80ccf1fad0bc50fd_Padded__body(i64 %nElementsTotal, { double, double }* %defaultElement, %Array* %inputArray) { +entry: + %0 = bitcast { double, double }* %defaultElement to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %nElementsInitial = call i64 @__quantum__rt__array_get_size_1d(%Array* %inputArray) + %1 = sub i64 %nElementsInitial, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 1) + %nAbsElementsTotal = call i64 @Microsoft__Quantum__Math__AbsI__body(i64 %nElementsTotal) + %9 = icmp sge i64 %nAbsElementsTotal, %nElementsInitial + %10 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([71 x i8], [71 x i8]* @19, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactB__body(i1 %9, i1 true, %String* %10) + %nElementsPad = sub i64 %nAbsElementsTotal, %nElementsInitial + %padArray = call %Array* @Microsoft__Quantum__Arrays___f1a022a821b14ff2afd521e39f1a1bcc_ConstantArray__body(i64 %nElementsPad, { double, double }* %defaultElement) + %11 = call i64 @__quantum__rt__array_get_size_1d(%Array* %padArray) + %12 = sub i64 %11, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %13 = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %13) + %16 = bitcast i8* %15 to { double, double }** + %17 = load { double, double }*, { double, double }** %16, align 8 + %18 = bitcast { double, double }* %17 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %13, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 1) + %20 = icmp sge i64 %nElementsTotal, 0 + br i1 %20, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %exit__2 + %21 = call %Array* @__quantum__rt__array_concatenate(%Array* %padArray, %Array* %inputArray) + %22 = call i64 @__quantum__rt__array_get_size_1d(%Array* %21) + %23 = sub i64 %22, 1 + br label %header__3 + +condFalse__1: ; preds = %exit__2 + %24 = call %Array* @__quantum__rt__array_concatenate(%Array* %inputArray, %Array* %padArray) + %25 = call i64 @__quantum__rt__array_get_size_1d(%Array* %24) + %26 = sub i64 %25, 1 + br label %header__4 + +condContinue__1: ; preds = %exit__4, %exit__3 + %27 = phi %Array* [ %21, %exit__3 ], [ %24, %exit__4 ] + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + %28 = sub i64 %nElementsInitial, 1 + br label %header__5 + +header__3: ; preds = %exiting__3, %condTrue__1 + %29 = phi i64 [ 0, %condTrue__1 ], [ %35, %exiting__3 ] + %30 = icmp sle i64 %29, %23 + br i1 %30, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + %33 = load { double, double }*, { double, double }** %32, align 8 + %34 = bitcast { double, double }* %33 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %35 = add i64 %29, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %21, i32 -1) + br label %condContinue__1 + +header__4: ; preds = %exiting__4, %condFalse__1 + %36 = phi i64 [ 0, %condFalse__1 ], [ %42, %exiting__4 ] + %37 = icmp sle i64 %36, %26 + br i1 %37, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %24, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %42 = add i64 %36, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %24, i32 -1) + br label %condContinue__1 + +header__5: ; preds = %exiting__5, %condContinue__1 + %43 = phi i64 [ 0, %condContinue__1 ], [ %49, %exiting__5 ] + %44 = icmp sle i64 %43, %28 + br i1 %44, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %inputArray, i64 %43) + %46 = bitcast i8* %45 to { double, double }** + %47 = load { double, double }*, { double, double }** %46, align 8 + %48 = bitcast { double, double }* %47 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %49 = add i64 %43, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %inputArray, i32 -1) + %50 = sub i64 %11, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %51 = phi i64 [ 0, %exit__5 ], [ %57, %exiting__6 ] + %52 = icmp sle i64 %51, %50 + br i1 %52, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %53 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %51) + %54 = bitcast i8* %53 to { double, double }** + %55 = load { double, double }*, { double, double }** %54, align 8 + %56 = bitcast { double, double }* %55 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %57 = add i64 %51, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %padArray, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %58 = sub i64 %11, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %59 = phi i64 [ 0, %exit__6 ], [ %65, %exiting__7 ] + %60 = icmp sle i64 %59, %58 + br i1 %60, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %padArray, i64 %59) + %62 = bitcast i8* %61 to { double, double }** + %63 = load { double, double }*, { double, double }** %62, align 8 + %64 = bitcast { double, double }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %64, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %65 = add i64 %59, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %padArray, i32 -1) + ret %Array* %27 +} + +define internal i64 @Microsoft__Quantum__Math__AbsI__body(i64 %a) { +entry: + %0 = icmp slt i64 %a, 0 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = sub i64 0, %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi i64 [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret i64 %2 +} + +define internal %Array* @Microsoft__Quantum__Arrays___f1a022a821b14ff2afd521e39f1a1bcc_ConstantArray__body(i64 %length, { double, double }* %value) { +entry: + %0 = bitcast { double, double }* %value to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %2 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + store { double, double }* %value, { double, double }** %6, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret %Array* %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___d7a5dace8d00477d9dcdf9020a467709_ConstantArray__body(i64 %length, double %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to double* + store double %value, double* %5, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +define internal { i64, i1 }* @Microsoft__Quantum__Arrays___81b2e45870f04b54ac181661cda83d5d___QsRef1__Identity____body(i64 %0, i1 %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i1 }* getelementptr ({ i64, i1 }, { i64, i1 }* null, i32 1) to i64)) + %input = bitcast %Tuple* %2 to { i64, i1 }* + %3 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %input, i32 0, i32 0 + %4 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %input, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store i1 %1, i1* %4, align 1 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret { i64, i1 }* %input +} + +define internal { i64, { double, double }* }* @Microsoft__Quantum__Arrays___b8c470817e3c4d54a387b72f70fe0572___QsRef1__Identity____body(i64 %0, { double, double }* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { double, double }* }* getelementptr ({ i64, { double, double }* }, { i64, { double, double }* }* null, i32 1) to i64)) + %input = bitcast %Tuple* %2 to { i64, { double, double }* }* + %3 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %input, i32 0, i32 0 + %4 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %input, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store { double, double }* %1, { double, double }** %4, align 8 + %5 = bitcast { double, double }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret { i64, { double, double }* }* %input +} + +define internal { i64, double }* @Microsoft__Quantum__Arrays___6ed5375d64984881b234f01e25bc55b9___QsRef1__Identity____body(i64 %0, double %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double }* getelementptr ({ i64, double }, { i64, double }* null, i32 1) to i64)) + %input = bitcast %Tuple* %2 to { i64, double }* + %3 = getelementptr inbounds { i64, double }, { i64, double }* %input, i32 0, i32 0 + %4 = getelementptr inbounds { i64, double }, { i64, double }* %input, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store double %1, double* %4, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret { i64, double }* %input +} + +define internal { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* @Microsoft__Quantum__Arrays___d0d4b543e4084f10a022319d0e6d7887___QsRef1__Identity____body(i64 %0, { { %Array*, i64 }*, { i64, %Callable* }* }* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* getelementptr ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %input = bitcast %Tuple* %2 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* + %3 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %input, i32 0, i32 0 + %4 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %input, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store { { %Array*, i64 }*, { i64, %Callable* }* }* %1, { { %Array*, i64 }*, { i64, %Callable* }* }** %4, align 8 + %5 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %1, i32 0, i32 0 + %6 = load { %Array*, i64 }*, { %Array*, i64 }** %5, align 8 + %7 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %6, i32 0, i32 0 + %8 = load %Array*, %Array** %7, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 1) + %9 = bitcast { %Array*, i64 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %1, i32 0, i32 1 + %11 = load { i64, %Callable* }*, { i64, %Callable* }** %10, align 8 + %12 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %11, i32 0, i32 1 + %13 = load %Callable*, %Callable** %12, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 1) + %14 = bitcast { i64, %Callable* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 1) + %15 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %13, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %8, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %13, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %input +} + +define internal { i64, %Array* }* @Microsoft__Quantum__Arrays___9e4eb8c66a5d41c0ab661fccd1f15c41___QsRef1__Identity____body(i64 %0, %Array* %1) { +entry: + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %input = bitcast %Tuple* %2 to { i64, %Array* }* + %3 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %input, i32 0, i32 0 + %4 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %input, i32 0, i32 1 + store i64 %0, i64* %3, align 4 + store %Array* %1, %Array** %4, align 8 + %5 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %6 = sub i64 %5, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %7 = phi i64 [ 0, %entry ], [ %23, %exiting__1 ] + %8 = icmp sle i64 %7, %6 + br i1 %8, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %7) + %10 = bitcast i8* %9 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %11 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %10, align 8 + %12 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %11, i32 0, i32 0 + %13 = load { %Array*, i64 }*, { %Array*, i64 }** %12, align 8 + %14 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %13, i32 0, i32 0 + %15 = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %16 = bitcast { %Array*, i64 }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + %17 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %11, i32 0, i32 1 + %18 = load { i64, %Callable* }*, { i64, %Callable* }** %17, align 8 + %19 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %18, i32 0, i32 1 + %20 = load %Callable*, %Callable** %19, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %20, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %20, i32 1) + %21 = bitcast { i64, %Callable* }* %18 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %21, i32 1) + %22 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %11 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %23 = add i64 %7, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %24 = sub i64 %5, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %25 = phi i64 [ 0, %exit__1 ], [ %41, %exiting__2 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %25) + %28 = bitcast i8* %27 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %29 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %28, align 8 + %30 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %29, i32 0, i32 0 + %31 = load { %Array*, i64 }*, { %Array*, i64 }** %30, align 8 + %32 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %31, i32 0, i32 0 + %33 = load %Array*, %Array** %32, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %33, i32 1) + %34 = bitcast { %Array*, i64 }* %31 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + %35 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %29, i32 0, i32 1 + %36 = load { i64, %Callable* }*, { i64, %Callable* }** %35, align 8 + %37 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %36, i32 0, i32 1 + %38 = load %Callable*, %Callable** %37, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %38, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %38, i32 1) + %39 = bitcast { i64, %Callable* }* %36 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 1) + %40 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %41 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 1) + %42 = sub i64 %5, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %43 = phi i64 [ 0, %exit__2 ], [ %59, %exiting__3 ] + %44 = icmp sle i64 %43, %42 + br i1 %44, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %45 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %43) + %46 = bitcast i8* %45 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %47 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %46, align 8 + %48 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %47, i32 0, i32 0 + %49 = load { %Array*, i64 }*, { %Array*, i64 }** %48, align 8 + %50 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %49, i32 0, i32 0 + %51 = load %Array*, %Array** %50, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %51, i32 -1) + %52 = bitcast { %Array*, i64 }* %49 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 -1) + %53 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %47, i32 0, i32 1 + %54 = load { i64, %Callable* }*, { i64, %Callable* }** %53, align 8 + %55 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %54, i32 0, i32 1 + %56 = load %Callable*, %Callable** %55, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %56, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %56, i32 -1) + %57 = bitcast { i64, %Callable* }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + %58 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %47 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %58, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %59 = add i64 %43, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + ret { i64, %Array* }* %input +} + +define internal %Array* @Microsoft__Quantum__Arrays___4aa279afe82a49f18051c32a38c71fb7_Filtered__body(%Callable* %predicate, %Array* %array) { +entry: + %idxArray = alloca %Array*, align 8 + %totalFound = alloca i64, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %predicate, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %predicate, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i64, i1 }** + %6 = load { i64, i1 }*, { i64, i1 }** %5, align 8 + %7 = bitcast { i64, i1 }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store i64 0, i64* %totalFound, align 4 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %0) + %10 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %11 = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 %11) + %14 = bitcast i8* %13 to i64* + store i64 0, i64* %14, align 4 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %15 = add i64 %11, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %9, %Array** %idxArray, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %16 = call %Range @Microsoft__Quantum__Arrays___f32399b15f5e44b594f235c5225a7400_IndexRange__body(%Array* %array) + %17 = extractvalue %Range %16, 0 + %18 = extractvalue %Range %16, 1 + %19 = extractvalue %Range %16, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__2 + %20 = icmp sgt i64 %18, 0 + br label %header__3 + +header__3: ; preds = %exiting__3, %preheader__1 + %idxElement = phi i64 [ %17, %preheader__1 ], [ %38, %exiting__3 ] + %21 = icmp sle i64 %idxElement, %19 + %22 = icmp sge i64 %idxElement, %19 + %23 = select i1 %20, i1 %21, i1 %22 + br i1 %23, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %25 = bitcast i8* %24 to { i64, i1 }** + %26 = load { i64, i1 }*, { i64, i1 }** %25, align 8 + %27 = bitcast { i64, i1 }* %26 to %Tuple* + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %predicate, %Tuple* %27, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { i1 }* + %30 = getelementptr inbounds { i1 }, { i1 }* %29, i32 0, i32 0 + %31 = load i1, i1* %30, align 1 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br i1 %31, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__3 + %32 = load %Array*, %Array** %idxArray, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = load i64, i64* %totalFound, align 4 + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %34) + %36 = bitcast i8* %35 to i64* + store i64 %idxElement, i64* %36, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %idxArray, align 8 + %37 = add i64 %34, 1 + store i64 %37, i64* %totalFound, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__3 + br label %exiting__3 + +exiting__3: ; preds = %continue__1 + %38 = add i64 %idxElement, %18 + br label %header__3 + +exit__3: ; preds = %header__3 + %39 = load %Array*, %Array** %idxArray, align 8 + %40 = load i64, i64* %totalFound, align 4 + %41 = sub i64 %40, 1 + %42 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %41, 2 + %43 = call %Array* @__quantum__rt__array_slice_1d(%Array* %39, %Range %42, i1 true) + %44 = call %Array* @Microsoft__Quantum__Arrays___88aec1558aed4c1cb84901b497b7f322_Subarray__body(%Array* %43, %Array* %array) + call void @__quantum__rt__capture_update_alias_count(%Callable* %predicate, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %predicate, i32 -1) + %45 = sub i64 %0, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %46) + %49 = bitcast i8* %48 to { i64, i1 }** + %50 = load { i64, i1 }*, { i64, i1 }** %49, align 8 + %51 = bitcast { i64, i1 }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %39, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %43, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %39, i32 -1) + ret %Array* %44 +} + +define internal %Array* @Microsoft__Quantum__Arrays___0642fb9fda514da39c521340e3041e14_MappedByIndex__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to i1* + %4 = load i1, i1* %3, align 1 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i1 }* getelementptr ({ i64, i1 }, { i64, i1 }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, i1 }* + %7 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %6, i32 0, i32 1 + store i64 0, i64* %7, align 4 + store i1 %4, i1* %8, align 1 + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i1 }* getelementptr ({ i64, i1 }, { i64, i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %9) + %first = bitcast %Tuple* %9 to { i64, i1 }* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %11 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %12 = phi i64 [ 0, %continue__1 ], [ %16, %exiting__1 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %12) + %15 = bitcast i8* %14 to { i64, i1 }** + store { i64, i1 }* %first, { i64, i1 }** %15, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %16 = add i64 %12, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %10, %Array** %retval, align 8 + %17 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %24, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %18) + %21 = bitcast i8* %20 to { i64, i1 }** + %22 = load { i64, i1 }*, { i64, i1 }** %21, align 8 + %23 = bitcast { i64, i1 }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %24 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %25 = sub i64 %length, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idx = phi i64 [ 1, %exit__2 ], [ %42, %exiting__3 ] + %26 = icmp sle i64 %idx, %25 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = call %Array* @__quantum__rt__array_copy(%Array* %27, i1 false) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %30 = bitcast i8* %29 to i1* + %31 = load i1, i1* %30, align 1 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i1 }* getelementptr ({ i64, i1 }, { i64, i1 }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { i64, i1 }* + %34 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %33, i32 0, i32 1 + store i64 %idx, i64* %34, align 4 + store i1 %31, i1* %35, align 1 + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, i1 }* getelementptr ({ i64, i1 }, { i64, i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %32, %Tuple* %36) + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %idx) + %38 = bitcast i8* %37 to { i64, i1 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 1) + %39 = bitcast %Tuple* %36 to { i64, i1 }* + %40 = load { i64, i1 }*, { i64, i1 }** %38, align 8 + %41 = bitcast { i64, i1 }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + store { i64, i1 }* %39, { i64, i1 }** %38, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + store %Array* %28, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %42 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %43 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + %44 = call i64 @__quantum__rt__array_get_size_1d(%Array* %43) + %45 = sub i64 %44, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 %46) + %49 = bitcast i8* %48 to { i64, i1 }** + %50 = load { i64, i1 }*, { i64, i1 }** %49, align 8 + %51 = bitcast { i64, i1 }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret %Array* %43 +} + +define internal %Array* @Microsoft__Quantum__Arrays___18b22a2872974a209223a31f4af592ba_MappedByIndex__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { double, double }** + %5 = load { double, double }*, { double, double }** %4, align 8 + %6 = bitcast { double, double }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp eq i64 %length, 0 + br i1 %8, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %10 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %12 = bitcast i8* %11 to { double, double }** + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 1) + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { double, double }* }* getelementptr ({ i64, { double, double }* }, { i64, { double, double }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { i64, { double, double }* }* + %17 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %16, i32 0, i32 1 + store i64 0, i64* %17, align 4 + store { double, double }* %13, { double, double }** %18, align 8 + %19 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { double, double }* }* getelementptr ({ i64, { double, double }* }, { i64, { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %15, %Tuple* %19) + %first = bitcast %Tuple* %19 to { i64, { double, double }* }* + %20 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %first, i32 0, i32 1 + %21 = load { double, double }*, { double, double }** %20, align 8 + %22 = bitcast { double, double }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %22, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %19, i32 1) + %23 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %24 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %25 = phi i64 [ 0, %then0__1 ], [ %31, %exiting__2 ] + %26 = icmp sle i64 %25, %10 + br i1 %26, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %30, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %31 = add i64 %25, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %9 + +header__3: ; preds = %exiting__3, %continue__1 + %32 = phi i64 [ 0, %continue__1 ], [ %38, %exiting__3 ] + %33 = icmp sle i64 %32, %24 + br i1 %33, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %32) + %35 = bitcast i8* %34 to { i64, { double, double }* }** + store { i64, { double, double }* }* %first, { i64, { double, double }* }** %35, align 8 + %36 = load { double, double }*, { double, double }** %20, align 8 + %37 = bitcast { double, double }* %36 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %38 = add i64 %32, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %23, %Array** %retval, align 8 + %39 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %40 = phi i64 [ 0, %exit__3 ], [ %49, %exiting__4 ] + %41 = icmp sle i64 %40, %39 + br i1 %41, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %42 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %40) + %43 = bitcast i8* %42 to { i64, { double, double }* }** + %44 = load { i64, { double, double }* }*, { i64, { double, double }* }** %43, align 8 + %45 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %44, i32 0, i32 1 + %46 = load { double, double }*, { double, double }** %45, align 8 + %47 = bitcast { double, double }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 1) + %48 = bitcast { i64, { double, double }* }* %44 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %48, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %49 = add i64 %40, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + %50 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %74, %exiting__5 ] + %51 = icmp sle i64 %idx, %50 + br i1 %51, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %52 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %52, i32 -1) + %53 = call %Array* @__quantum__rt__array_copy(%Array* %52, i1 false) + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %55 = bitcast i8* %54 to { double, double }** + %56 = load { double, double }*, { double, double }** %55, align 8 + %57 = bitcast { double, double }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %57, i32 1) + %58 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { double, double }* }* getelementptr ({ i64, { double, double }* }, { i64, { double, double }* }* null, i32 1) to i64)) + %59 = bitcast %Tuple* %58 to { i64, { double, double }* }* + %60 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %59, i32 0, i32 0 + %61 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %59, i32 0, i32 1 + store i64 %idx, i64* %60, align 4 + store { double, double }* %56, { double, double }** %61, align 8 + %62 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { double, double }* }* getelementptr ({ i64, { double, double }* }, { i64, { double, double }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %58, %Tuple* %62) + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %idx) + %64 = bitcast i8* %63 to { i64, { double, double }* }** + %65 = bitcast %Tuple* %62 to { i64, { double, double }* }* + %66 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %65, i32 0, i32 1 + %67 = load { double, double }*, { double, double }** %66, align 8 + %68 = bitcast { double, double }* %67 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %68, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %62, i32 1) + %69 = load { i64, { double, double }* }*, { i64, { double, double }* }** %64, align 8 + %70 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %69, i32 0, i32 1 + %71 = load { double, double }*, { double, double }** %70, align 8 + %72 = bitcast { double, double }* %71 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %72, i32 -1) + %73 = bitcast { i64, { double, double }* }* %69 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %73, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %72, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %73, i32 -1) + store { i64, { double, double }* }* %65, { i64, { double, double }* }** %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + store %Array* %53, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %52, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %57, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %58, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %74 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %75 = load %Array*, %Array** %retval, align 8 + %76 = load { double, double }*, { double, double }** %20, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %77 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %78 = phi i64 [ 0, %exit__5 ], [ %84, %exiting__6 ] + %79 = icmp sle i64 %78, %77 + br i1 %79, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %80 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %78) + %81 = bitcast i8* %80 to { double, double }** + %82 = load { double, double }*, { double, double }** %81, align 8 + %83 = bitcast { double, double }* %82 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %83, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %84 = add i64 %78, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %85 = bitcast { double, double }* %76 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %85, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %19, i32 -1) + %86 = call i64 @__quantum__rt__array_get_size_1d(%Array* %75) + %87 = sub i64 %86, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %88 = phi i64 [ 0, %exit__6 ], [ %97, %exiting__7 ] + %89 = icmp sle i64 %88, %87 + br i1 %89, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %90 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %75, i64 %88) + %91 = bitcast i8* %90 to { i64, { double, double }* }** + %92 = load { i64, { double, double }* }*, { i64, { double, double }* }** %91, align 8 + %93 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %92, i32 0, i32 1 + %94 = load { double, double }*, { double, double }** %93, align 8 + %95 = bitcast { double, double }* %94 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %95, i32 -1) + %96 = bitcast { i64, { double, double }* }* %92 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %96, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %97 = add i64 %88, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %75, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %85, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + ret %Array* %75 +} + +define internal %Array* @Microsoft__Quantum__Arrays___27b80d3c0afd4dd68f55d127c5cdfce5_MappedByIndex__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to double* + %4 = load double, double* %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double }* getelementptr ({ i64, double }, { i64, double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i64, double }* + %7 = getelementptr inbounds { i64, double }, { i64, double }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, double }, { i64, double }* %6, i32 0, i32 1 + store i64 0, i64* %7, align 4 + store double %4, double* %8, align 8 + %9 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double }* getelementptr ({ i64, double }, { i64, double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %9) + %first = bitcast %Tuple* %9 to { i64, double }* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 1) + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %11 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %12 = phi i64 [ 0, %continue__1 ], [ %16, %exiting__1 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %12) + %15 = bitcast i8* %14 to { i64, double }** + store { i64, double }* %first, { i64, double }** %15, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %16 = add i64 %12, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %10, %Array** %retval, align 8 + %17 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %18 = phi i64 [ 0, %exit__1 ], [ %24, %exiting__2 ] + %19 = icmp sle i64 %18, %17 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %18) + %21 = bitcast i8* %20 to { i64, double }** + %22 = load { i64, double }*, { i64, double }** %21, align 8 + %23 = bitcast { i64, double }* %22 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %23, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %24 = add i64 %18, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %25 = sub i64 %length, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idx = phi i64 [ 1, %exit__2 ], [ %42, %exiting__3 ] + %26 = icmp sle i64 %idx, %25 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 -1) + %28 = call %Array* @__quantum__rt__array_copy(%Array* %27, i1 false) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %30 = bitcast i8* %29 to double* + %31 = load double, double* %30, align 8 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double }* getelementptr ({ i64, double }, { i64, double }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { i64, double }* + %34 = getelementptr inbounds { i64, double }, { i64, double }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { i64, double }, { i64, double }* %33, i32 0, i32 1 + store i64 %idx, i64* %34, align 4 + store double %31, double* %35, align 8 + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double }* getelementptr ({ i64, double }, { i64, double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %32, %Tuple* %36) + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %28, i64 %idx) + %38 = bitcast i8* %37 to { i64, double }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 1) + %39 = bitcast %Tuple* %36 to { i64, double }* + %40 = load { i64, double }*, { i64, double }** %38, align 8 + %41 = bitcast { i64, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %41, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %41, i32 -1) + store { i64, double }* %39, { i64, double }** %38, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %28, i32 1) + store %Array* %28, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %42 = add i64 %idx, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %43 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %9, i32 -1) + %44 = call i64 @__quantum__rt__array_get_size_1d(%Array* %43) + %45 = sub i64 %44, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %46 = phi i64 [ 0, %exit__3 ], [ %52, %exiting__4 ] + %47 = icmp sle i64 %46, %45 + br i1 %47, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %48 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %43, i64 %46) + %49 = bitcast i8* %48 to { i64, double }** + %50 = load { i64, double }*, { i64, double }** %49, align 8 + %51 = bitcast { i64, double }* %50 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %52 = add i64 %46, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %43, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + ret %Array* %43 +} + +define internal %Array* @Microsoft__Quantum__Arrays___4e18ab692bdc46809cf35e50e230ef2a_MappedByIndex__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %5 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %4, align 8 + %6 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %5, i32 0, i32 0 + %7 = load { %Array*, i64 }*, { %Array*, i64 }** %6, align 8 + %8 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %7, i32 0, i32 0 + %9 = load %Array*, %Array** %8, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 1) + %10 = bitcast { %Array*, i64 }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + %11 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %5, i32 0, i32 1 + %12 = load { i64, %Callable* }*, { i64, %Callable* }** %11, align 8 + %13 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %12, i32 0, i32 1 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %14, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %14, i32 1) + %15 = bitcast { i64, %Callable* }* %12 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %15, i32 1) + %16 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %16, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %18 = icmp eq i64 %length, 0 + br i1 %18, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %19 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %20 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %22 = bitcast i8* %21 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %23 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %22, align 8 + %24 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %23, i32 0, i32 0 + %25 = load { %Array*, i64 }*, { %Array*, i64 }** %24, align 8 + %26 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %25, i32 0, i32 0 + %27 = load %Array*, %Array** %26, align 8 + %28 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %23, i32 0, i32 1 + %29 = load { i64, %Callable* }*, { i64, %Callable* }** %28, align 8 + %30 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %29, i32 0, i32 1 + %31 = load %Callable*, %Callable** %30, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 1) + %32 = bitcast { %Array*, i64 }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 1) + %33 = bitcast { i64, %Callable* }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 1) + %34 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 1) + %35 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* getelementptr ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %36 = bitcast %Tuple* %35 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* + %37 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %36, i32 0, i32 0 + %38 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %36, i32 0, i32 1 + store i64 0, i64* %37, align 4 + store { { %Array*, i64 }*, { i64, %Callable* }* }* %23, { { %Array*, i64 }*, { i64, %Callable* }* }** %38, align 8 + %39 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* getelementptr ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %35, %Tuple* %39) + %first = bitcast %Tuple* %39 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* + %40 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %first, i32 0, i32 1 + %41 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %40, align 8 + %42 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %41, i32 0, i32 0 + %43 = load { %Array*, i64 }*, { %Array*, i64 }** %42, align 8 + %44 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %43, i32 0, i32 0 + %45 = load %Array*, %Array** %44, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %45, i32 1) + %46 = bitcast { %Array*, i64 }* %43 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %46, i32 1) + %47 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %41, i32 0, i32 1 + %48 = load { i64, %Callable* }*, { i64, %Callable* }** %47, align 8 + %49 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %48, i32 0, i32 1 + %50 = load %Callable*, %Callable** %49, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %50, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %50, i32 1) + %51 = bitcast { i64, %Callable* }* %48 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 1) + %52 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %41 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %52, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %39, i32 1) + %53 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %54 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %55 = phi i64 [ 0, %then0__1 ], [ %71, %exiting__2 ] + %56 = icmp sle i64 %55, %20 + br i1 %56, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %57 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %55) + %58 = bitcast i8* %57 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %59 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %58, align 8 + %60 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %59, i32 0, i32 0 + %61 = load { %Array*, i64 }*, { %Array*, i64 }** %60, align 8 + %62 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %61, i32 0, i32 0 + %63 = load %Array*, %Array** %62, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %63, i32 -1) + %64 = bitcast { %Array*, i64 }* %61 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %64, i32 -1) + %65 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %59, i32 0, i32 1 + %66 = load { i64, %Callable* }*, { i64, %Callable* }** %65, align 8 + %67 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %66, i32 0, i32 1 + %68 = load %Callable*, %Callable** %67, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %68, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %68, i32 -1) + %69 = bitcast { i64, %Callable* }* %66 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %69, i32 -1) + %70 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %59 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %70, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %71 = add i64 %55, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %19 + +header__3: ; preds = %exiting__3, %continue__1 + %72 = phi i64 [ 0, %continue__1 ], [ %88, %exiting__3 ] + %73 = icmp sle i64 %72, %54 + br i1 %73, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %74 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %72) + %75 = bitcast i8* %74 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** + store { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %first, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** %75, align 8 + %76 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %40, align 8 + %77 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %76, i32 0, i32 0 + %78 = load { %Array*, i64 }*, { %Array*, i64 }** %77, align 8 + %79 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %78, i32 0, i32 0 + %80 = load %Array*, %Array** %79, align 8 + %81 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %76, i32 0, i32 1 + %82 = load { i64, %Callable* }*, { i64, %Callable* }** %81, align 8 + %83 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %82, i32 0, i32 1 + %84 = load %Callable*, %Callable** %83, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %80, i32 1) + %85 = bitcast { %Array*, i64 }* %78 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %85, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %84, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %84, i32 1) + %86 = bitcast { i64, %Callable* }* %82 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %86, i32 1) + %87 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %76 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %87, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %88 = add i64 %72, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %53, %Array** %retval, align 8 + %89 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %90 = phi i64 [ 0, %exit__3 ], [ %109, %exiting__4 ] + %91 = icmp sle i64 %90, %89 + br i1 %91, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %92 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %53, i64 %90) + %93 = bitcast i8* %92 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** + %94 = load { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }*, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** %93, align 8 + %95 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %94, i32 0, i32 1 + %96 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %95, align 8 + %97 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %96, i32 0, i32 0 + %98 = load { %Array*, i64 }*, { %Array*, i64 }** %97, align 8 + %99 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %98, i32 0, i32 0 + %100 = load %Array*, %Array** %99, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %100, i32 1) + %101 = bitcast { %Array*, i64 }* %98 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %101, i32 1) + %102 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %96, i32 0, i32 1 + %103 = load { i64, %Callable* }*, { i64, %Callable* }** %102, align 8 + %104 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %103, i32 0, i32 1 + %105 = load %Callable*, %Callable** %104, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %105, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %105, i32 1) + %106 = bitcast { i64, %Callable* }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %106, i32 1) + %107 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %107, i32 1) + %108 = bitcast { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %94 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %108, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %109 = add i64 %90, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %53, i32 1) + %110 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %idx = phi i64 [ 1, %exit__4 ], [ %164, %exiting__5 ] + %111 = icmp sle i64 %idx, %110 + br i1 %111, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %112 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %112, i32 -1) + %113 = call %Array* @__quantum__rt__array_copy(%Array* %112, i1 false) + %114 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %115 = bitcast i8* %114 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %116 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %115, align 8 + %117 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %116, i32 0, i32 0 + %118 = load { %Array*, i64 }*, { %Array*, i64 }** %117, align 8 + %119 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %118, i32 0, i32 0 + %120 = load %Array*, %Array** %119, align 8 + %121 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %116, i32 0, i32 1 + %122 = load { i64, %Callable* }*, { i64, %Callable* }** %121, align 8 + %123 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %122, i32 0, i32 1 + %124 = load %Callable*, %Callable** %123, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 1) + %125 = bitcast { %Array*, i64 }* %118 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %125, i32 1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %124, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %124, i32 1) + %126 = bitcast { i64, %Callable* }* %122 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %126, i32 1) + %127 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %116 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %127, i32 1) + %128 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* getelementptr ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + %129 = bitcast %Tuple* %128 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* + %130 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %129, i32 0, i32 0 + %131 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %129, i32 0, i32 1 + store i64 %idx, i64* %130, align 4 + store { { %Array*, i64 }*, { i64, %Callable* }* }* %116, { { %Array*, i64 }*, { i64, %Callable* }* }** %131, align 8 + %132 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* getelementptr ({ i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %128, %Tuple* %132) + %133 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %113, i64 %idx) + %134 = bitcast i8* %133 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** + %135 = bitcast %Tuple* %132 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* + %136 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %135, i32 0, i32 1 + %137 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %136, align 8 + %138 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %137, i32 0, i32 0 + %139 = load { %Array*, i64 }*, { %Array*, i64 }** %138, align 8 + %140 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %139, i32 0, i32 0 + %141 = load %Array*, %Array** %140, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %141, i32 1) + %142 = bitcast { %Array*, i64 }* %139 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %142, i32 1) + %143 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %137, i32 0, i32 1 + %144 = load { i64, %Callable* }*, { i64, %Callable* }** %143, align 8 + %145 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %144, i32 0, i32 1 + %146 = load %Callable*, %Callable** %145, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %146, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %146, i32 1) + %147 = bitcast { i64, %Callable* }* %144 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %147, i32 1) + %148 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %137 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %148, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %132, i32 1) + %149 = load { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }*, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** %134, align 8 + %150 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %149, i32 0, i32 1 + %151 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %150, align 8 + %152 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %151, i32 0, i32 0 + %153 = load { %Array*, i64 }*, { %Array*, i64 }** %152, align 8 + %154 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %153, i32 0, i32 0 + %155 = load %Array*, %Array** %154, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %155, i32 -1) + %156 = bitcast { %Array*, i64 }* %153 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %156, i32 -1) + %157 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %151, i32 0, i32 1 + %158 = load { i64, %Callable* }*, { i64, %Callable* }** %157, align 8 + %159 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %158, i32 0, i32 1 + %160 = load %Callable*, %Callable** %159, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %160, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %160, i32 -1) + %161 = bitcast { i64, %Callable* }* %158 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %161, i32 -1) + %162 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %151 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %162, i32 -1) + %163 = bitcast { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %149 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %163, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %155, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %156, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %160, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %160, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %161, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %162, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %163, i32 -1) + store { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %135, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** %134, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %113, i32 1) + store %Array* %113, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %112, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %120, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %125, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %124, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %124, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %126, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %127, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %164 = add i64 %idx, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + %165 = load %Array*, %Array** %retval, align 8 + %166 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %40, align 8 + %167 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %166, i32 0, i32 0 + %168 = load { %Array*, i64 }*, { %Array*, i64 }** %167, align 8 + %169 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %168, i32 0, i32 0 + %170 = load %Array*, %Array** %169, align 8 + %171 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %166, i32 0, i32 1 + %172 = load { i64, %Callable* }*, { i64, %Callable* }** %171, align 8 + %173 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %172, i32 0, i32 1 + %174 = load %Callable*, %Callable** %173, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %175 = sub i64 %length, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %176 = phi i64 [ 0, %exit__5 ], [ %192, %exiting__6 ] + %177 = icmp sle i64 %176, %175 + br i1 %177, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %178 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %176) + %179 = bitcast i8* %178 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %180 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %179, align 8 + %181 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %180, i32 0, i32 0 + %182 = load { %Array*, i64 }*, { %Array*, i64 }** %181, align 8 + %183 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %182, i32 0, i32 0 + %184 = load %Array*, %Array** %183, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %184, i32 -1) + %185 = bitcast { %Array*, i64 }* %182 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %185, i32 -1) + %186 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %180, i32 0, i32 1 + %187 = load { i64, %Callable* }*, { i64, %Callable* }** %186, align 8 + %188 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %187, i32 0, i32 1 + %189 = load %Callable*, %Callable** %188, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %189, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %189, i32 -1) + %190 = bitcast { i64, %Callable* }* %187 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %190, i32 -1) + %191 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %180 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %191, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %192 = add i64 %176, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %170, i32 -1) + %193 = bitcast { %Array*, i64 }* %168 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %193, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %174, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %174, i32 -1) + %194 = bitcast { i64, %Callable* }* %172 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %194, i32 -1) + %195 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %166 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %195, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %39, i32 -1) + %196 = call i64 @__quantum__rt__array_get_size_1d(%Array* %165) + %197 = sub i64 %196, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %198 = phi i64 [ 0, %exit__6 ], [ %217, %exiting__7 ] + %199 = icmp sle i64 %198, %197 + br i1 %199, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %200 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %165, i64 %198) + %201 = bitcast i8* %200 to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** + %202 = load { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }*, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }** %201, align 8 + %203 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %202, i32 0, i32 1 + %204 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %203, align 8 + %205 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %204, i32 0, i32 0 + %206 = load { %Array*, i64 }*, { %Array*, i64 }** %205, align 8 + %207 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %206, i32 0, i32 0 + %208 = load %Array*, %Array** %207, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %208, i32 -1) + %209 = bitcast { %Array*, i64 }* %206 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 -1) + %210 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %204, i32 0, i32 1 + %211 = load { i64, %Callable* }*, { i64, %Callable* }** %210, align 8 + %212 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %211, i32 0, i32 1 + %213 = load %Callable*, %Callable** %212, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %213, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %213, i32 -1) + %214 = bitcast { i64, %Callable* }* %211 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %214, i32 -1) + %215 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %204 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %215, i32 -1) + %216 = bitcast { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %202 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %216, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %217 = add i64 %198, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %165, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %27, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %32, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %170, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %193, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %174, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %174, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %194, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %195, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %39, i32 -1) + ret %Array* %165 +} + +define internal %Array* @Microsoft__Quantum__Arrays___593f6ec0c6174564a8ee8add732e267d_MappedByIndex__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to %Array** + %5 = load %Array*, %Array** %4, align 8 + %6 = call i64 @__quantum__rt__array_get_size_1d(%Array* %5) + %7 = sub i64 %6, 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %8 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = icmp eq i64 %length, 0 + br i1 %9, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %11 = sub i64 %length, 1 + br label %header__3 + +continue__1: ; preds = %exit__1 + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %13 = bitcast i8* %12 to %Array** + %14 = load %Array*, %Array** %13, align 8 + %15 = call i64 @__quantum__rt__array_get_size_1d(%Array* %14) + %16 = sub i64 %15, 1 + br label %header__5 + +header__2: ; preds = %exiting__2, %body__1 + %17 = phi i64 [ 0, %body__1 ], [ %33, %exiting__2 ] + %18 = icmp sle i64 %17, %7 + br i1 %18, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 %17) + %20 = bitcast i8* %19 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %21 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %20, align 8 + %22 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %21, i32 0, i32 0 + %23 = load { %Array*, i64 }*, { %Array*, i64 }** %22, align 8 + %24 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %23, i32 0, i32 0 + %25 = load %Array*, %Array** %24, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %25, i32 1) + %26 = bitcast { %Array*, i64 }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %26, i32 1) + %27 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %21, i32 0, i32 1 + %28 = load { i64, %Callable* }*, { i64, %Callable* }** %27, align 8 + %29 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %28, i32 0, i32 1 + %30 = load %Callable*, %Callable** %29, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %30, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %30, i32 1) + %31 = bitcast { i64, %Callable* }* %28 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %31, i32 1) + %32 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %21 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %32, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %17, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %5, i32 1) + br label %exiting__1 + +header__3: ; preds = %exiting__3, %then0__1 + %34 = phi i64 [ 0, %then0__1 ], [ %41, %exiting__3 ] + %35 = icmp sle i64 %34, %11 + br i1 %35, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %34) + %37 = bitcast i8* %36 to %Array** + %38 = load %Array*, %Array** %37, align 8 + %39 = call i64 @__quantum__rt__array_get_size_1d(%Array* %38) + %40 = sub i64 %39, 1 + br label %header__4 + +exiting__3: ; preds = %exit__4 + %41 = add i64 %34, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %10 + +header__4: ; preds = %exiting__4, %body__3 + %42 = phi i64 [ 0, %body__3 ], [ %58, %exiting__4 ] + %43 = icmp sle i64 %42, %40 + br i1 %43, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %38, i64 %42) + %45 = bitcast i8* %44 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %46 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %45, align 8 + %47 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %46, i32 0, i32 0 + %48 = load { %Array*, i64 }*, { %Array*, i64 }** %47, align 8 + %49 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %48, i32 0, i32 0 + %50 = load %Array*, %Array** %49, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %50, i32 -1) + %51 = bitcast { %Array*, i64 }* %48 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %51, i32 -1) + %52 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %46, i32 0, i32 1 + %53 = load { i64, %Callable* }*, { i64, %Callable* }** %52, align 8 + %54 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %53, i32 0, i32 1 + %55 = load %Callable*, %Callable** %54, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %55, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %55, i32 -1) + %56 = bitcast { i64, %Callable* }* %53 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + %57 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %42, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %38, i32 -1) + br label %exiting__3 + +header__5: ; preds = %exiting__5, %continue__1 + %59 = phi i64 [ 0, %continue__1 ], [ %75, %exiting__5 ] + %60 = icmp sle i64 %59, %16 + br i1 %60, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %61 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %59) + %62 = bitcast i8* %61 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %63 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %62, align 8 + %64 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %63, i32 0, i32 0 + %65 = load { %Array*, i64 }*, { %Array*, i64 }** %64, align 8 + %66 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %65, i32 0, i32 0 + %67 = load %Array*, %Array** %66, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %67, i32 1) + %68 = bitcast { %Array*, i64 }* %65 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %68, i32 1) + %69 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %63, i32 0, i32 1 + %70 = load { i64, %Callable* }*, { i64, %Callable* }** %69, align 8 + %71 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %70, i32 0, i32 1 + %72 = load %Callable*, %Callable** %71, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %72, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %72, i32 1) + %73 = bitcast { i64, %Callable* }* %70 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %73, i32 1) + %74 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %63 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %74, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %75 = add i64 %59, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 1) + %76 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %77 = bitcast %Tuple* %76 to { i64, %Array* }* + %78 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %77, i32 0, i32 0 + %79 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %77, i32 0, i32 1 + store i64 0, i64* %78, align 4 + store %Array* %14, %Array** %79, align 8 + %80 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %76, %Tuple* %80) + %first = bitcast %Tuple* %80 to { i64, %Array* }* + %81 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %first, i32 0, i32 1 + %82 = load %Array*, %Array** %81, align 8 + %83 = call i64 @__quantum__rt__array_get_size_1d(%Array* %82) + %84 = sub i64 %83, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %85 = phi i64 [ 0, %exit__5 ], [ %101, %exiting__6 ] + %86 = icmp sle i64 %85, %84 + br i1 %86, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %87 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %82, i64 %85) + %88 = bitcast i8* %87 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %89 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %88, align 8 + %90 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %89, i32 0, i32 0 + %91 = load { %Array*, i64 }*, { %Array*, i64 }** %90, align 8 + %92 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %91, i32 0, i32 0 + %93 = load %Array*, %Array** %92, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %93, i32 1) + %94 = bitcast { %Array*, i64 }* %91 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %94, i32 1) + %95 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %89, i32 0, i32 1 + %96 = load { i64, %Callable* }*, { i64, %Callable* }** %95, align 8 + %97 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %96, i32 0, i32 1 + %98 = load %Callable*, %Callable** %97, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %98, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %98, i32 1) + %99 = bitcast { i64, %Callable* }* %96 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %99, i32 1) + %100 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %89 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %100, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %101 = add i64 %85, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %82, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 1) + %102 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %103 = sub i64 %length, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %104 = phi i64 [ 0, %exit__6 ], [ %111, %exiting__7 ] + %105 = icmp sle i64 %104, %103 + br i1 %105, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %106 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 %104) + %107 = bitcast i8* %106 to { i64, %Array* }** + store { i64, %Array* }* %first, { i64, %Array* }** %107, align 8 + %108 = load %Array*, %Array** %81, align 8 + %109 = call i64 @__quantum__rt__array_get_size_1d(%Array* %108) + %110 = sub i64 %109, 1 + br label %header__8 + +exiting__7: ; preds = %exit__8 + %111 = add i64 %104, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + store %Array* %102, %Array** %retval, align 8 + %112 = sub i64 %length, 1 + br label %header__9 + +header__8: ; preds = %exiting__8, %body__7 + %113 = phi i64 [ 0, %body__7 ], [ %129, %exiting__8 ] + %114 = icmp sle i64 %113, %110 + br i1 %114, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %115 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %113) + %116 = bitcast i8* %115 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %117 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %116, align 8 + %118 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %117, i32 0, i32 0 + %119 = load { %Array*, i64 }*, { %Array*, i64 }** %118, align 8 + %120 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %119, i32 0, i32 0 + %121 = load %Array*, %Array** %120, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %121, i32 1) + %122 = bitcast { %Array*, i64 }* %119 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %122, i32 1) + %123 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %117, i32 0, i32 1 + %124 = load { i64, %Callable* }*, { i64, %Callable* }** %123, align 8 + %125 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %124, i32 0, i32 1 + %126 = load %Callable*, %Callable** %125, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %126, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %126, i32 1) + %127 = bitcast { i64, %Callable* }* %124 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %127, i32 1) + %128 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %117 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %128, i32 1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %129 = add i64 %113, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %108, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 1) + br label %exiting__7 + +header__9: ; preds = %exiting__9, %exit__7 + %130 = phi i64 [ 0, %exit__7 ], [ %139, %exiting__9 ] + %131 = icmp sle i64 %130, %112 + br i1 %131, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %132 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %102, i64 %130) + %133 = bitcast i8* %132 to { i64, %Array* }** + %134 = load { i64, %Array* }*, { i64, %Array* }** %133, align 8 + %135 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %134, i32 0, i32 1 + %136 = load %Array*, %Array** %135, align 8 + %137 = call i64 @__quantum__rt__array_get_size_1d(%Array* %136) + %138 = sub i64 %137, 1 + br label %header__10 + +exiting__9: ; preds = %exit__10 + %139 = add i64 %130, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %102, i32 1) + %140 = sub i64 %length, 1 + br label %header__11 + +header__10: ; preds = %exiting__10, %body__9 + %141 = phi i64 [ 0, %body__9 ], [ %157, %exiting__10 ] + %142 = icmp sle i64 %141, %138 + br i1 %142, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %143 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %136, i64 %141) + %144 = bitcast i8* %143 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %145 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %144, align 8 + %146 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %145, i32 0, i32 0 + %147 = load { %Array*, i64 }*, { %Array*, i64 }** %146, align 8 + %148 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %147, i32 0, i32 0 + %149 = load %Array*, %Array** %148, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %149, i32 1) + %150 = bitcast { %Array*, i64 }* %147 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %150, i32 1) + %151 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %145, i32 0, i32 1 + %152 = load { i64, %Callable* }*, { i64, %Callable* }** %151, align 8 + %153 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %152, i32 0, i32 1 + %154 = load %Callable*, %Callable** %153, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %154, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %154, i32 1) + %155 = bitcast { i64, %Callable* }* %152 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %155, i32 1) + %156 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %145 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %156, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %157 = add i64 %141, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_alias_count(%Array* %136, i32 1) + %158 = bitcast { i64, %Array* }* %134 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %158, i32 1) + br label %exiting__9 + +header__11: ; preds = %exiting__11, %exit__9 + %idx = phi i64 [ 1, %exit__9 ], [ %167, %exiting__11 ] + %159 = icmp sle i64 %idx, %140 + br i1 %159, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %160 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %160, i32 -1) + %161 = call %Array* @__quantum__rt__array_copy(%Array* %160, i1 false) + %162 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %163 = bitcast i8* %162 to %Array** + %164 = load %Array*, %Array** %163, align 8 + %165 = call i64 @__quantum__rt__array_get_size_1d(%Array* %164) + %166 = sub i64 %165, 1 + br label %header__12 + +exiting__11: ; preds = %exit__16 + %167 = add i64 %idx, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + %168 = load %Array*, %Array** %retval, align 8 + %169 = load %Array*, %Array** %81, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %170 = sub i64 %length, 1 + br label %header__17 + +header__12: ; preds = %exiting__12, %body__11 + %171 = phi i64 [ 0, %body__11 ], [ %187, %exiting__12 ] + %172 = icmp sle i64 %171, %166 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %164, i64 %171) + %174 = bitcast i8* %173 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %175 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %174, align 8 + %176 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %175, i32 0, i32 0 + %177 = load { %Array*, i64 }*, { %Array*, i64 }** %176, align 8 + %178 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %177, i32 0, i32 0 + %179 = load %Array*, %Array** %178, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %179, i32 1) + %180 = bitcast { %Array*, i64 }* %177 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %180, i32 1) + %181 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %175, i32 0, i32 1 + %182 = load { i64, %Callable* }*, { i64, %Callable* }** %181, align 8 + %183 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %182, i32 0, i32 1 + %184 = load %Callable*, %Callable** %183, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %184, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %184, i32 1) + %185 = bitcast { i64, %Callable* }* %182 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %185, i32 1) + %186 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %175 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %186, i32 1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %187 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_reference_count(%Array* %164, i32 1) + %188 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + %189 = bitcast %Tuple* %188 to { i64, %Array* }* + %190 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %189, i32 0, i32 0 + %191 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %189, i32 0, i32 1 + store i64 %idx, i64* %190, align 4 + store %Array* %164, %Array** %191, align 8 + %192 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, %Array* }* getelementptr ({ i64, %Array* }, { i64, %Array* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %188, %Tuple* %192) + %193 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %161, i64 %idx) + %194 = bitcast i8* %193 to { i64, %Array* }** + %195 = bitcast %Tuple* %192 to { i64, %Array* }* + %196 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %195, i32 0, i32 1 + %197 = load %Array*, %Array** %196, align 8 + %198 = call i64 @__quantum__rt__array_get_size_1d(%Array* %197) + %199 = sub i64 %198, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %200 = phi i64 [ 0, %exit__12 ], [ %216, %exiting__13 ] + %201 = icmp sle i64 %200, %199 + br i1 %201, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %202 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %197, i64 %200) + %203 = bitcast i8* %202 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %204 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %203, align 8 + %205 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %204, i32 0, i32 0 + %206 = load { %Array*, i64 }*, { %Array*, i64 }** %205, align 8 + %207 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %206, i32 0, i32 0 + %208 = load %Array*, %Array** %207, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %208, i32 1) + %209 = bitcast { %Array*, i64 }* %206 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %209, i32 1) + %210 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %204, i32 0, i32 1 + %211 = load { i64, %Callable* }*, { i64, %Callable* }** %210, align 8 + %212 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %211, i32 0, i32 1 + %213 = load %Callable*, %Callable** %212, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %213, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %213, i32 1) + %214 = bitcast { i64, %Callable* }* %211 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %214, i32 1) + %215 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %204 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %215, i32 1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %216 = add i64 %200, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_alias_count(%Array* %197, i32 1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %192, i32 1) + %217 = load { i64, %Array* }*, { i64, %Array* }** %194, align 8 + %218 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %217, i32 0, i32 1 + %219 = load %Array*, %Array** %218, align 8 + %220 = call i64 @__quantum__rt__array_get_size_1d(%Array* %219) + %221 = sub i64 %220, 1 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %222 = phi i64 [ 0, %exit__13 ], [ %238, %exiting__14 ] + %223 = icmp sle i64 %222, %221 + br i1 %223, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %224 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %219, i64 %222) + %225 = bitcast i8* %224 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %226 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %225, align 8 + %227 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %226, i32 0, i32 0 + %228 = load { %Array*, i64 }*, { %Array*, i64 }** %227, align 8 + %229 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %228, i32 0, i32 0 + %230 = load %Array*, %Array** %229, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %230, i32 -1) + %231 = bitcast { %Array*, i64 }* %228 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %231, i32 -1) + %232 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %226, i32 0, i32 1 + %233 = load { i64, %Callable* }*, { i64, %Callable* }** %232, align 8 + %234 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %233, i32 0, i32 1 + %235 = load %Callable*, %Callable** %234, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %235, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %235, i32 -1) + %236 = bitcast { i64, %Callable* }* %233 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %236, i32 -1) + %237 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %226 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %237, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %238 = add i64 %222, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_alias_count(%Array* %219, i32 -1) + %239 = bitcast { i64, %Array* }* %217 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %239, i32 -1) + %240 = sub i64 %220, 1 + br label %header__15 + +header__15: ; preds = %exiting__15, %exit__14 + %241 = phi i64 [ 0, %exit__14 ], [ %257, %exiting__15 ] + %242 = icmp sle i64 %241, %240 + br i1 %242, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %243 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %219, i64 %241) + %244 = bitcast i8* %243 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %245 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %244, align 8 + %246 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %245, i32 0, i32 0 + %247 = load { %Array*, i64 }*, { %Array*, i64 }** %246, align 8 + %248 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %247, i32 0, i32 0 + %249 = load %Array*, %Array** %248, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %249, i32 -1) + %250 = bitcast { %Array*, i64 }* %247 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %250, i32 -1) + %251 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %245, i32 0, i32 1 + %252 = load { i64, %Callable* }*, { i64, %Callable* }** %251, align 8 + %253 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %252, i32 0, i32 1 + %254 = load %Callable*, %Callable** %253, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %254, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %254, i32 -1) + %255 = bitcast { i64, %Callable* }* %252 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %255, i32 -1) + %256 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %245 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %256, i32 -1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %257 = add i64 %241, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %219, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %239, i32 -1) + store { i64, %Array* }* %195, { i64, %Array* }** %194, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %161, i32 1) + store %Array* %161, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %160, i32 -1) + %258 = sub i64 %165, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %259 = phi i64 [ 0, %exit__15 ], [ %275, %exiting__16 ] + %260 = icmp sle i64 %259, %258 + br i1 %260, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %261 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %164, i64 %259) + %262 = bitcast i8* %261 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %263 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %262, align 8 + %264 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %263, i32 0, i32 0 + %265 = load { %Array*, i64 }*, { %Array*, i64 }** %264, align 8 + %266 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %265, i32 0, i32 0 + %267 = load %Array*, %Array** %266, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %267, i32 -1) + %268 = bitcast { %Array*, i64 }* %265 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %268, i32 -1) + %269 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %263, i32 0, i32 1 + %270 = load { i64, %Callable* }*, { i64, %Callable* }** %269, align 8 + %271 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %270, i32 0, i32 1 + %272 = load %Callable*, %Callable** %271, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %272, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %272, i32 -1) + %273 = bitcast { i64, %Callable* }* %270 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %273, i32 -1) + %274 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %263 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %274, i32 -1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %275 = add i64 %259, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_reference_count(%Array* %164, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %188, i32 -1) + br label %exiting__11 + +header__17: ; preds = %exiting__17, %exit__11 + %276 = phi i64 [ 0, %exit__11 ], [ %283, %exiting__17 ] + %277 = icmp sle i64 %276, %170 + br i1 %277, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %278 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %276) + %279 = bitcast i8* %278 to %Array** + %280 = load %Array*, %Array** %279, align 8 + %281 = call i64 @__quantum__rt__array_get_size_1d(%Array* %280) + %282 = sub i64 %281, 1 + br label %header__18 + +exiting__17: ; preds = %exit__18 + %283 = add i64 %276, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + %284 = call i64 @__quantum__rt__array_get_size_1d(%Array* %169) + %285 = sub i64 %284, 1 + br label %header__19 + +header__18: ; preds = %exiting__18, %body__17 + %286 = phi i64 [ 0, %body__17 ], [ %302, %exiting__18 ] + %287 = icmp sle i64 %286, %282 + br i1 %287, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %288 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %280, i64 %286) + %289 = bitcast i8* %288 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %290 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %289, align 8 + %291 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %290, i32 0, i32 0 + %292 = load { %Array*, i64 }*, { %Array*, i64 }** %291, align 8 + %293 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %292, i32 0, i32 0 + %294 = load %Array*, %Array** %293, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %294, i32 -1) + %295 = bitcast { %Array*, i64 }* %292 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %295, i32 -1) + %296 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %290, i32 0, i32 1 + %297 = load { i64, %Callable* }*, { i64, %Callable* }** %296, align 8 + %298 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %297, i32 0, i32 1 + %299 = load %Callable*, %Callable** %298, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %299, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %299, i32 -1) + %300 = bitcast { i64, %Callable* }* %297 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %300, i32 -1) + %301 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %290 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %301, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %302 = add i64 %286, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_alias_count(%Array* %280, i32 -1) + br label %exiting__17 + +header__19: ; preds = %exiting__19, %exit__17 + %303 = phi i64 [ 0, %exit__17 ], [ %319, %exiting__19 ] + %304 = icmp sle i64 %303, %285 + br i1 %304, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %305 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %169, i64 %303) + %306 = bitcast i8* %305 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %307 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %306, align 8 + %308 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %307, i32 0, i32 0 + %309 = load { %Array*, i64 }*, { %Array*, i64 }** %308, align 8 + %310 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %309, i32 0, i32 0 + %311 = load %Array*, %Array** %310, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %311, i32 -1) + %312 = bitcast { %Array*, i64 }* %309 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %312, i32 -1) + %313 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %307, i32 0, i32 1 + %314 = load { i64, %Callable* }*, { i64, %Callable* }** %313, align 8 + %315 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %314, i32 0, i32 1 + %316 = load %Callable*, %Callable** %315, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %316, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %316, i32 -1) + %317 = bitcast { i64, %Callable* }* %314 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %317, i32 -1) + %318 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %307 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %318, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %319 = add i64 %303, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_alias_count(%Array* %169, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %80, i32 -1) + %320 = call i64 @__quantum__rt__array_get_size_1d(%Array* %168) + %321 = sub i64 %320, 1 + br label %header__20 + +header__20: ; preds = %exiting__20, %exit__19 + %322 = phi i64 [ 0, %exit__19 ], [ %331, %exiting__20 ] + %323 = icmp sle i64 %322, %321 + br i1 %323, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %324 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %168, i64 %322) + %325 = bitcast i8* %324 to { i64, %Array* }** + %326 = load { i64, %Array* }*, { i64, %Array* }** %325, align 8 + %327 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %326, i32 0, i32 1 + %328 = load %Array*, %Array** %327, align 8 + %329 = call i64 @__quantum__rt__array_get_size_1d(%Array* %328) + %330 = sub i64 %329, 1 + br label %header__21 + +exiting__20: ; preds = %exit__21 + %331 = add i64 %322, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %168, i32 -1) + %332 = sub i64 %15, 1 + br label %header__22 + +header__21: ; preds = %exiting__21, %body__20 + %333 = phi i64 [ 0, %body__20 ], [ %349, %exiting__21 ] + %334 = icmp sle i64 %333, %330 + br i1 %334, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %335 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %328, i64 %333) + %336 = bitcast i8* %335 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %337 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %336, align 8 + %338 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %337, i32 0, i32 0 + %339 = load { %Array*, i64 }*, { %Array*, i64 }** %338, align 8 + %340 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %339, i32 0, i32 0 + %341 = load %Array*, %Array** %340, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %341, i32 -1) + %342 = bitcast { %Array*, i64 }* %339 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %342, i32 -1) + %343 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %337, i32 0, i32 1 + %344 = load { i64, %Callable* }*, { i64, %Callable* }** %343, align 8 + %345 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %344, i32 0, i32 1 + %346 = load %Callable*, %Callable** %345, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %346, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %346, i32 -1) + %347 = bitcast { i64, %Callable* }* %344 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %347, i32 -1) + %348 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %337 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %348, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %349 = add i64 %333, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %328, i32 -1) + %350 = bitcast { i64, %Array* }* %326 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %350, i32 -1) + br label %exiting__20 + +header__22: ; preds = %exiting__22, %exit__20 + %351 = phi i64 [ 0, %exit__20 ], [ %367, %exiting__22 ] + %352 = icmp sle i64 %351, %332 + br i1 %352, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %353 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %14, i64 %351) + %354 = bitcast i8* %353 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %355 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %354, align 8 + %356 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %355, i32 0, i32 0 + %357 = load { %Array*, i64 }*, { %Array*, i64 }** %356, align 8 + %358 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %357, i32 0, i32 0 + %359 = load %Array*, %Array** %358, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %359, i32 -1) + %360 = bitcast { %Array*, i64 }* %357 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %360, i32 -1) + %361 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %355, i32 0, i32 1 + %362 = load { i64, %Callable* }*, { i64, %Callable* }** %361, align 8 + %363 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %362, i32 0, i32 1 + %364 = load %Callable*, %Callable** %363, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %364, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %364, i32 -1) + %365 = bitcast { i64, %Callable* }* %362 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %365, i32 -1) + %366 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %355 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %366, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %367 = add i64 %351, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_reference_count(%Array* %14, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %76, i32 -1) + %368 = sub i64 %284, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %369 = phi i64 [ 0, %exit__22 ], [ %385, %exiting__23 ] + %370 = icmp sle i64 %369, %368 + br i1 %370, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %371 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %169, i64 %369) + %372 = bitcast i8* %371 to { { %Array*, i64 }*, { i64, %Callable* }* }** + %373 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %372, align 8 + %374 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %373, i32 0, i32 0 + %375 = load { %Array*, i64 }*, { %Array*, i64 }** %374, align 8 + %376 = getelementptr inbounds { %Array*, i64 }, { %Array*, i64 }* %375, i32 0, i32 0 + %377 = load %Array*, %Array** %376, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %377, i32 -1) + %378 = bitcast { %Array*, i64 }* %375 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %378, i32 -1) + %379 = getelementptr inbounds { { %Array*, i64 }*, { i64, %Callable* }* }, { { %Array*, i64 }*, { i64, %Callable* }* }* %373, i32 0, i32 1 + %380 = load { i64, %Callable* }*, { i64, %Callable* }** %379, align 8 + %381 = getelementptr inbounds { i64, %Callable* }, { i64, %Callable* }* %380, i32 0, i32 1 + %382 = load %Callable*, %Callable** %381, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %382, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %382, i32 -1) + %383 = bitcast { i64, %Callable* }* %380 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %383, i32 -1) + %384 = bitcast { { %Array*, i64 }*, { i64, %Callable* }* }* %373 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %384, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %385 = add i64 %369, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_reference_count(%Array* %169, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %80, i32 -1) + ret %Array* %168 +} + +define internal %Array* @Microsoft__Quantum__Arrays___5ef583d7ebd84277a2b7db5af95f2088_Enumerated__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Arrays___81b2e45870f04b54ac181661cda83d5d___QsRef1__Identity____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___0642fb9fda514da39c521340e3041e14_MappedByIndex__body(%Callable* %0, %Array* %array) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret %Array* %1 +} + +define internal void @Microsoft__Quantum__Arrays___81b2e45870f04b54ac181661cda83d5d___QsRef1__Identity____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i1 }* + %1 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load i1, i1* %2, align 1 + %5 = call { i64, i1 }* @Microsoft__Quantum__Arrays___81b2e45870f04b54ac181661cda83d5d___QsRef1__Identity____body(i64 %3, i1 %4) + %6 = bitcast %Tuple* %result-tuple to { i64, i1 }* + %7 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %5, i32 0, i32 0 + %10 = load i64, i64* %9, align 4 + store i64 %10, i64* %7, align 4 + %11 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %5, i32 0, i32 1 + %12 = load i1, i1* %11, align 1 + store i1 %12, i1* %8, align 1 + %13 = bitcast { i64, i1 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___6ed5375d64984881b234f01e25bc55b9___QsRef1__Identity____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, double }* + %1 = getelementptr inbounds { i64, double }, { i64, double }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, double }, { i64, double }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load double, double* %2, align 8 + %5 = call { i64, double }* @Microsoft__Quantum__Arrays___6ed5375d64984881b234f01e25bc55b9___QsRef1__Identity____body(i64 %3, double %4) + %6 = bitcast %Tuple* %result-tuple to { i64, double }* + %7 = getelementptr inbounds { i64, double }, { i64, double }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, double }, { i64, double }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, double }, { i64, double }* %5, i32 0, i32 0 + %10 = load i64, i64* %9, align 4 + store i64 %10, i64* %7, align 4 + %11 = getelementptr inbounds { i64, double }, { i64, double }* %5, i32 0, i32 1 + %12 = load double, double* %11, align 8 + store double %12, double* %8, align 8 + %13 = bitcast { i64, double }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___b8c470817e3c4d54a387b72f70fe0572___QsRef1__Identity____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { double, double }* }* + %1 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load { double, double }*, { double, double }** %2, align 8 + %5 = call { i64, { double, double }* }* @Microsoft__Quantum__Arrays___b8c470817e3c4d54a387b72f70fe0572___QsRef1__Identity____body(i64 %3, { double, double }* %4) + %6 = bitcast %Tuple* %result-tuple to { i64, { double, double }* }* + %7 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %5, i32 0, i32 0 + %10 = load i64, i64* %9, align 4 + store i64 %10, i64* %7, align 4 + %11 = getelementptr inbounds { i64, { double, double }* }, { i64, { double, double }* }* %5, i32 0, i32 1 + %12 = load { double, double }*, { double, double }** %11, align 8 + store { double, double }* %12, { double, double }** %8, align 8 + %13 = bitcast { i64, { double, double }* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___9e4eb8c66a5d41c0ab661fccd1f15c41___QsRef1__Identity____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, %Array* }* + %1 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load %Array*, %Array** %2, align 8 + %5 = call { i64, %Array* }* @Microsoft__Quantum__Arrays___9e4eb8c66a5d41c0ab661fccd1f15c41___QsRef1__Identity____body(i64 %3, %Array* %4) + %6 = bitcast %Tuple* %result-tuple to { i64, %Array* }* + %7 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %5, i32 0, i32 0 + %10 = load i64, i64* %9, align 4 + store i64 %10, i64* %7, align 4 + %11 = getelementptr inbounds { i64, %Array* }, { i64, %Array* }* %5, i32 0, i32 1 + %12 = load %Array*, %Array** %11, align 8 + store %Array* %12, %Array** %8, align 8 + %13 = bitcast { i64, %Array* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Arrays___d0d4b543e4084f10a022319d0e6d7887___QsRef1__Identity____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* + %1 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %2, align 8 + %5 = call { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* @Microsoft__Quantum__Arrays___d0d4b543e4084f10a022319d0e6d7887___QsRef1__Identity____body(i64 %3, { { %Array*, i64 }*, { i64, %Callable* }* }* %4) + %6 = bitcast %Tuple* %result-tuple to { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* + %7 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %5, i32 0, i32 0 + %10 = load i64, i64* %9, align 4 + store i64 %10, i64* %7, align 4 + %11 = getelementptr inbounds { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }, { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %5, i32 0, i32 1 + %12 = load { { %Array*, i64 }*, { i64, %Callable* }* }*, { { %Array*, i64 }*, { i64, %Callable* }* }** %11, align 8 + store { { %Array*, i64 }*, { i64, %Callable* }* }* %12, { { %Array*, i64 }*, { i64, %Callable* }* }** %8, align 8 + %13 = bitcast { i64, { { %Array*, i64 }*, { i64, %Callable* }* }* }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %13, i32 -1) + ret void +} + +define internal i1 @Microsoft__Quantum__Arrays___e14e05cbd7674cf99f7174e4f55f22e1_Fold__body(%Callable* %folder, i1 %state, %Array* %array) { +entry: + %current = alloca i1, align 1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + store i1 %state, i1* %current, align 1 + %0 = call %Range @Microsoft__Quantum__Arrays___e16320bd27aa426885e6375e173405df_IndexRange__body(%Array* %array) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxElement = phi i64 [ %1, %preheader__1 ], [ %20, %exiting__1 ] + %5 = icmp sle i64 %idxElement, %3 + %6 = icmp sge i64 %idxElement, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = load i1, i1* %current, align 1 + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idxElement) + %10 = bitcast i8* %9 to i1* + %11 = load i1, i1* %10, align 1 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, i1 }* getelementptr ({ i1, i1 }, { i1, i1 }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { i1, i1 }* + %14 = getelementptr inbounds { i1, i1 }, { i1, i1 }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { i1, i1 }, { i1, i1 }* %13, i32 0, i32 1 + store i1 %8, i1* %14, align 1 + store i1 %11, i1* %15, align 1 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %folder, %Tuple* %12, %Tuple* %16) + %17 = bitcast %Tuple* %16 to { i1 }* + %18 = getelementptr inbounds { i1 }, { i1 }* %17, i32 0, i32 0 + %19 = load i1, i1* %18, align 1 + store i1 %19, i1* %current, align 1 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %20 = add i64 %idxElement, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + %21 = load i1, i1* %current, align 1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %folder, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret i1 %21 +} + +define internal void @Microsoft__Quantum__Logical__And__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1, i1 }* + %1 = getelementptr inbounds { i1, i1 }, { i1, i1 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i1, i1 }, { i1, i1 }* %0, i32 0, i32 1 + %3 = load i1, i1* %1, align 1 + %4 = load i1, i1* %2, align 1 + %5 = call i1 @Microsoft__Quantum__Logical__And__body(i1 %3, i1 %4) + %6 = bitcast %Tuple* %result-tuple to { i1 }* + %7 = getelementptr inbounds { i1 }, { i1 }* %6, i32 0, i32 0 + store i1 %5, i1* %7, align 1 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___83850f08600e4e54b1fe3e670f742428_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { double, double }** + %5 = load { double, double }*, { double, double }** %4, align 8 + %6 = bitcast { double, double }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp eq i64 %length, 0 + br i1 %8, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %10 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %12 = bitcast i8* %11 to { double, double }** + %13 = load { double, double }*, { double, double }** %12, align 8 + %14 = bitcast { double, double }* %13 to %Tuple* + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %14, %Tuple* %15) + %16 = bitcast %Tuple* %15 to { i1 }* + %17 = getelementptr inbounds { i1 }, { i1 }* %16, i32 0, i32 0 + %first = load i1, i1* %17, align 1 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %19 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %10 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { double, double }** + %24 = load { double, double }*, { double, double }** %23, align 8 + %25 = bitcast { double, double }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %9 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %27) + %30 = bitcast i8* %29 to i1* + store i1 %first, i1* %30, align 1 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %46, %exiting__4 ] + %33 = icmp sle i64 %idx, %32 + br i1 %33, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %34 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %37 = bitcast i8* %36 to { double, double }** + %38 = load { double, double }*, { double, double }** %37, align 8 + %39 = bitcast { double, double }* %38 to %Tuple* + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %39, %Tuple* %40) + %41 = bitcast %Tuple* %40 to { i1 }* + %42 = getelementptr inbounds { i1 }, { i1 }* %41, i32 0, i32 0 + %43 = load i1, i1* %42, align 1 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %idx) + %45 = bitcast i8* %44 to i1* + store i1 %43, i1* %45, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %48 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %49 = phi i64 [ 0, %exit__4 ], [ %55, %exiting__5 ] + %50 = icmp sle i64 %49, %48 + br i1 %50, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %49) + %52 = bitcast i8* %51 to { double, double }** + %53 = load { double, double }*, { double, double }** %52, align 8 + %54 = bitcast { double, double }* %53 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %55 = add i64 %49, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Array* %47 +} + +define internal i1 @Microsoft__Quantum__Logical__And__body(i1 %a, i1 %b) { +entry: + %0 = and i1 %a, %b + ret i1 %0 +} + +define internal void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %actual, %String* %message) { +entry: + %0 = xor i1 %actual, true + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__string_update_reference_count(%String* %message, i32 1) + call void @__quantum__rt__fail(%String* %message) + unreachable + +continue__1: ; preds = %entry + ret void +} + +define internal i64 @Microsoft__Quantum__Math__MinI__body(i64 %a, i64 %b) { +entry: + %0 = icmp slt i64 %a, %b + %1 = select i1 %0, i64 %a, i64 %b + ret i64 %1 +} + +define internal %Array* @Microsoft__Quantum__Arrays___6c6f349b2d0c4e67b944e93ebc590a5b_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { i64, i1 }** + %5 = load { i64, i1 }*, { i64, i1 }** %4, align 8 + %6 = bitcast { i64, i1 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp eq i64 %length, 0 + br i1 %8, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %10 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %12 = bitcast i8* %11 to { i64, i1 }** + %13 = load { i64, i1 }*, { i64, i1 }** %12, align 8 + %14 = bitcast { i64, i1 }* %13 to %Tuple* + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %14, %Tuple* %15) + %16 = bitcast %Tuple* %15 to { i64 }* + %17 = getelementptr inbounds { i64 }, { i64 }* %16, i32 0, i32 0 + %first = load i64, i64* %17, align 4 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %19 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %10 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { i64, i1 }** + %24 = load { i64, i1 }*, { i64, i1 }** %23, align 8 + %25 = bitcast { i64, i1 }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %9 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %27) + %30 = bitcast i8* %29 to i64* + store i64 %first, i64* %30, align 4 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %46, %exiting__4 ] + %33 = icmp sle i64 %idx, %32 + br i1 %33, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %34 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %37 = bitcast i8* %36 to { i64, i1 }** + %38 = load { i64, i1 }*, { i64, i1 }** %37, align 8 + %39 = bitcast { i64, i1 }* %38 to %Tuple* + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64 }* getelementptr ({ i64 }, { i64 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %39, %Tuple* %40) + %41 = bitcast %Tuple* %40 to { i64 }* + %42 = getelementptr inbounds { i64 }, { i64 }* %41, i32 0, i32 0 + %43 = load i64, i64* %42, align 4 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %idx) + %45 = bitcast i8* %44 to i64* + store i64 %43, i64* %45, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %48 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %49 = phi i64 [ 0, %exit__4 ], [ %55, %exiting__5 ] + %50 = icmp sle i64 %49, %48 + br i1 %50, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %49) + %52 = bitcast i8* %51 to { i64, i1 }** + %53 = load { i64, i1 }*, { i64, i1 }** %52, align 8 + %54 = bitcast { i64, i1 }* %53 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %55 = add i64 %49, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Array* %47 +} + +define internal void @Microsoft__Quantum__Canon___facc0657b0284c16ae2c0d999b143be0_Fst__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i1 }* + %1 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load i1, i1* %2, align 1 + %5 = call i64 @Microsoft__Quantum__Canon___facc0657b0284c16ae2c0d999b143be0_Fst__body(i64 %3, i1 %4) + %6 = bitcast %Tuple* %result-tuple to { i64 }* + %7 = getelementptr inbounds { i64 }, { i64 }* %6, i32 0, i32 0 + store i64 %5, i64* %7, align 4 + ret void +} + +define internal void @Microsoft__Quantum__Canon___bfd0dc2872b54301bd24b64a2c23e89e_Snd__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i64, i1 }* + %1 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i64, i1 }, { i64, i1 }* %0, i32 0, i32 1 + %3 = load i64, i64* %1, align 4 + %4 = load i1, i1* %2, align 1 + %5 = call i1 @Microsoft__Quantum__Canon___bfd0dc2872b54301bd24b64a2c23e89e_Snd__body(i64 %3, i1 %4) + %6 = bitcast %Tuple* %result-tuple to { i1 }* + %7 = getelementptr inbounds { i1 }, { i1 }* %6, i32 0, i32 0 + store i1 %5, i1* %7, align 1 + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___847843f64261497cbcdec42c04c12cc1_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %1 = phi i64 [ 0, %entry ], [ %7, %exiting__1 ] + %2 = icmp sle i64 %1, %0 + br i1 %2, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %1) + %4 = bitcast i8* %3 to { i64, i64 }** + %5 = load { i64, i64 }*, { i64, i64 }** %4, align 8 + %6 = bitcast { i64, i64 }* %5 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %6, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %7 = add i64 %1, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %8 = icmp eq i64 %length, 0 + br i1 %8, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__1 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %10 = sub i64 %length, 1 + br label %header__2 + +continue__1: ; preds = %exit__1 + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %12 = bitcast i8* %11 to { i64, i64 }** + %13 = load { i64, i64 }*, { i64, i64 }** %12, align 8 + %14 = bitcast { i64, i64 }* %13 to %Tuple* + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %14, %Tuple* %15) + %16 = bitcast %Tuple* %15 to { i1 }* + %17 = getelementptr inbounds { i1 }, { i1 }* %16, i32 0, i32 0 + %first = load i1, i1* %17, align 1 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %19 = sub i64 %length, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %then0__1 + %20 = phi i64 [ 0, %then0__1 ], [ %26, %exiting__2 ] + %21 = icmp sle i64 %20, %10 + br i1 %21, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %20) + %23 = bitcast i8* %22 to { i64, i64 }** + %24 = load { i64, i64 }*, { i64, i64 }** %23, align 8 + %25 = bitcast { i64, i64 }* %24 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %26 = add i64 %20, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %9 + +header__3: ; preds = %exiting__3, %continue__1 + %27 = phi i64 [ 0, %continue__1 ], [ %31, %exiting__3 ] + %28 = icmp sle i64 %27, %19 + br i1 %28, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %27) + %30 = bitcast i8* %29 to i1* + store i1 %first, i1* %30, align 1 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %27, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %32 = sub i64 %length, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %idx = phi i64 [ 1, %exit__3 ], [ %46, %exiting__4 ] + %33 = icmp sle i64 %idx, %32 + br i1 %33, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %34 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %34, i32 -1) + %35 = call %Array* @__quantum__rt__array_copy(%Array* %34, i1 false) + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %37 = bitcast i8* %36 to { i64, i64 }** + %38 = load { i64, i64 }*, { i64, i64 }** %37, align 8 + %39 = bitcast { i64, i64 }* %38 to %Tuple* + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %39, %Tuple* %40) + %41 = bitcast %Tuple* %40 to { i1 }* + %42 = getelementptr inbounds { i1 }, { i1 }* %41, i32 0, i32 0 + %43 = load i1, i1* %42, align 1 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %35, i64 %idx) + %45 = bitcast i8* %44 to i1* + store i1 %43, i1* %45, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 1) + store %Array* %35, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %34, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %40, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %46 = add i64 %idx, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + %47 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + %48 = sub i64 %length, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %49 = phi i64 [ 0, %exit__4 ], [ %55, %exiting__5 ] + %50 = icmp sle i64 %49, %48 + br i1 %50, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %49) + %52 = bitcast i8* %51 to { i64, i64 }** + %53 = load { i64, i64 }*, { i64, i64 }** %52, align 8 + %54 = bitcast { i64, i64 }* %53 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %54, i32 -1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %55 = add i64 %49, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + ret %Array* %47 +} + +define internal %Array* @Microsoft__Quantum__Arrays___cc24b2dc7eb146c6a86121e0aab81fa7_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i1* + %7 = load i1, i1* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i1, %Qubit* }* + %13 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %12, i32 0, i32 1 + store i1 %7, i1* %13, align 1 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i1, %Qubit* }** + store { i1, %Qubit* }* %12, { i1, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i1, %Qubit* }** + %27 = load { i1, %Qubit* }*, { i1, %Qubit* }** %26, align 8 + %28 = bitcast { i1, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i1* + %36 = load i1, i1* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Qubit* }* getelementptr ({ i1, %Qubit* }, { i1, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i1, %Qubit* }* + %42 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i1, %Qubit* }, { i1, %Qubit* }* %41, i32 0, i32 1 + store i1 %36, i1* %42, align 1 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i1, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i1, %Qubit* }*, { i1, %Qubit* }** %45, align 8 + %47 = bitcast { i1, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i1, %Qubit* }* %41, { i1, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i1, %Qubit* }** + %56 = load { i1, %Qubit* }*, { i1, %Qubit* }** %55, align 8 + %57 = bitcast { i1, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal %Array* @Microsoft__Quantum__Arrays___b419e961477e441ea98f00f19ccb1574_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to i1* + %4 = load i1, i1* %3, align 1 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { i1 }* + %7 = getelementptr inbounds { i1 }, { i1 }* %6, i32 0, i32 0 + store i1 %4, i1* %7, align 1 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { i1 }* + %10 = getelementptr inbounds { i1 }, { i1 }* %9, i32 0, i32 0 + %first = load i1, i1* %10, align 1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %12 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %13 = phi i64 [ 0, %continue__1 ], [ %17, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %13) + %16 = bitcast i8* %15 to i1* + store i1 %first, i1* %16, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %11, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %18 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %34, %exiting__2 ] + %19 = icmp sle i64 %idx, %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + %21 = call %Array* @__quantum__rt__array_copy(%Array* %20, i1 false) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %23 = bitcast i8* %22 to i1* + %24 = load i1, i1* %23, align 1 + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { i1 }* + %27 = getelementptr inbounds { i1 }, { i1 }* %26, i32 0, i32 0 + store i1 %24, i1* %27, align 1 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %25, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { i1 }* + %30 = getelementptr inbounds { i1 }, { i1 }* %29, i32 0, i32 0 + %31 = load i1, i1* %30, align 1 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %idx) + %33 = bitcast i8* %32 to i1* + store i1 %31, i1* %33, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + store %Array* %21, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %34 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %35 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %35 +} + +define internal { i64, double, i1 }* @Microsoft__Quantum__Math____QsRef2__ExtendedTruncation____body(double %value) { +entry: + %truncated = fptosi double %value to i64 + %0 = sitofp i64 %truncated to double + %1 = fsub double %0, %value + %2 = fcmp oge double %value, 0.000000e+00 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i64, double, i1 }* getelementptr ({ i64, double, i1 }, { i64, double, i1 }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i64, double, i1 }* + %5 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i64, double, i1 }, { i64, double, i1 }* %4, i32 0, i32 2 + store i64 %truncated, i64* %5, align 4 + store double %1, double* %6, align 8 + store i1 %2, i1* %7, align 1 + ret { i64, double, i1 }* %4 +} + +define internal double @Microsoft__Quantum__Math__ArcTan__body(double %d) { +entry: + %0 = call double @__quantum__qis__arctan__body(double %d) + ret double %0 +} + +define internal double @Microsoft__Quantum__Math__ArcTan2__body(double %y, double %x) { +entry: + %0 = call double @__quantum__qis__arctan2__body(double %y, double %x) + ret double %0 +} + +declare double @__quantum__qis__arctan2__body(double, double) + +define internal double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %input) { +entry: + %0 = bitcast { double, double }* %input to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = getelementptr inbounds { double, double }, { double, double }* %input, i32 0, i32 1 + %2 = load double, double* %1, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + ret double %2 +} + +declare double @__quantum__qis__log__body(double) + +define internal double @Microsoft__Quantum__Math__LogOf2__body() { +entry: + ret double 0x3FE62E42FEFA39EF +} + +define internal double @Microsoft__Quantum__Math__Log__body(double %input) { +entry: + %0 = call double @__quantum__qis__log__body(double %input) + ret double %0 +} + +define internal double @Microsoft__Quantum__Math__Sqrt__body(double %d) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double %d) + ret double %0 +} + +declare double @__quantum__qis__sqrt__body(double) + +define internal void @Microsoft__Quantum__Diagnostics___b01e27bf91bd4e748af9bc2d289c8960___QsRef1__FormattedFailure____body(i1 %actual, i1 %expected, %String* %message) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @21, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_concatenate(%String* %0, %String* %message) + %2 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @22, i32 0, i32 0)) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %2, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + br i1 %expected, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %5 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @23, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @24, i32 0, i32 0)) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %7 = phi %String* [ %5, %condTrue__1 ], [ %6, %condFalse__1 ] + %8 = call %String* @__quantum__rt__string_concatenate(%String* %4, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %9 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @25, i32 0, i32 0)) + %10 = call %String* @__quantum__rt__string_concatenate(%String* %8, %String* %9) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + br i1 %actual, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condContinue__1 + %11 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @23, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condContinue__1 + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @24, i32 0, i32 0)) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condTrue__2 + %13 = phi %String* [ %11, %condTrue__2 ], [ %12, %condFalse__2 ] + %14 = call %String* @__quantum__rt__string_concatenate(%String* %10, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + call void @__quantum__rt__fail(%String* %14) + unreachable +} + +define internal %Array* @Microsoft__Quantum__Convert__IntAsBoolArray__body(i64 %number, i64 %bits) { +entry: + %tempInt = alloca i64, align 8 + %outputBits = alloca %Array*, align 8 + %0 = icmp sge i64 %bits, 0 + br i1 %0, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %1 = icmp sle i64 %bits, 63 + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %2 = phi i1 [ %1, %condTrue__1 ], [ %0, %entry ] + %3 = trunc i64 %bits to i32 + %4 = call double @llvm.powi.f64.i32(double 2.000000e+00, i32 %3) + %5 = fptosi double %4 to i64 + %6 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([33 x i8], [33 x i8]* @26, i32 0, i32 0)) + %7 = call %String* @__quantum__rt__int_to_string(i64 %5) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %2, %String* %8) + %9 = icmp slt i64 %bits, 63 + br i1 %9, label %condTrue__2, label %condFalse__1 + +condTrue__2: ; preds = %condContinue__1 + %10 = shl i64 1, %bits + br label %condContinue__2 + +condFalse__1: ; preds = %condContinue__1 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__1, %condTrue__2 + %max = phi i64 [ %10, %condTrue__2 ], [ 9223372036854775807, %condFalse__1 ] + %11 = icmp sge i64 %number, 0 + br i1 %11, label %condTrue__3, label %condContinue__3 + +condTrue__3: ; preds = %condContinue__2 + %12 = icmp sle i64 %number, %max + br label %condContinue__3 + +condContinue__3: ; preds = %condTrue__3, %condContinue__2 + %13 = phi i1 [ %12, %condTrue__3 ], [ %11, %condContinue__2 ] + %14 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([34 x i8], [34 x i8]* @27, i32 0, i32 0)) + %15 = call %String* @__quantum__rt__int_to_string(i64 %bits) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + %17 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([15 x i8], [15 x i8]* @28, i32 0, i32 0)) + %18 = call %String* @__quantum__rt__string_concatenate(%String* %16, %String* %17) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %17, i32 -1) + %19 = call %String* @__quantum__rt__int_to_string(i64 %number) + %20 = call %String* @__quantum__rt__string_concatenate(%String* %18, %String* %19) + call void @__quantum__rt__string_update_reference_count(%String* %18, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %19, i32 -1) + %21 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @3, i32 0, i32 0)) + %22 = call %String* @__quantum__rt__string_concatenate(%String* %20, %String* %21) + call void @__quantum__rt__string_update_reference_count(%String* %20, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %21, i32 -1) + call void @Microsoft__Quantum__Diagnostics__Fact__body(i1 %13, %String* %22) + %23 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %bits) + %24 = sub i64 %bits, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %condContinue__3 + %25 = phi i64 [ 0, %condContinue__3 ], [ %29, %exiting__1 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %23, i64 %25) + %28 = bitcast i8* %27 to i1* + store i1 false, i1* %28, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %29 = add i64 %25, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %23, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %23, i32 1) + store i64 %number, i64* %tempInt, align 4 + %30 = sub i64 %bits, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idxBit = phi i64 [ 0, %exit__1 ], [ %41, %exiting__2 ] + %31 = icmp sle i64 %idxBit, %30 + br i1 %31, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %32 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = load i64, i64* %tempInt, align 4 + %35 = srem i64 %34, 2 + %36 = icmp eq i64 %35, 0 + %37 = select i1 %36, i1 false, i1 true + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxBit) + %39 = bitcast i8* %38 to i1* + store i1 %37, i1* %39, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %outputBits, align 8 + %40 = sdiv i64 %34, 2 + store i64 %40, i64* %tempInt, align 4 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %41 = add i64 %idxBit, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %42 = load %Array*, %Array** %outputBits, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %42, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + ret %Array* %42 +} + +define internal i1 @Microsoft__Quantum__Logical__Not__body(i1 %value) { +entry: + %0 = xor i1 %value, true + ret i1 %0 +} + +define internal { double, double }* @Microsoft__Quantum__Optimization____QsRef1__NextProbes____body(double %left, double %right) { +entry: + %0 = call double @__quantum__qis__sqrt__body(double 5.000000e+00) + %1 = fadd double %0, 1.000000e+00 + %goldenRatio = fdiv double %1, 2.000000e+00 + %2 = call double @Microsoft__Quantum__Optimization____QsRef1__Width____body(double %left, double %right) + %delta = fdiv double %2, %goldenRatio + %3 = fsub double %right, %delta + %4 = fadd double %left, %delta + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { double, double }* + %7 = getelementptr inbounds { double, double }, { double, double }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { double, double }, { double, double }* %6, i32 0, i32 1 + store double %3, double* %7, align 8 + store double %4, double* %8, align 8 + ret { double, double }* %6 +} + +define internal double @Microsoft__Quantum__Optimization____QsRef1__Width____body(double %left, double %right) { +entry: + %0 = fsub double %right, %left + ret double %0 +} + +define internal { double, double }* @Microsoft__Quantum__Optimization____QsRef1__ProbeValue____body(%Callable* %fn, double %coord) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %fn, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %fn, i32 1) + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double }* + %2 = getelementptr inbounds { double }, { double }* %1, i32 0, i32 0 + store double %coord, double* %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double }* getelementptr ({ double }, { double }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %fn, %Tuple* %0, %Tuple* %3) + %4 = bitcast %Tuple* %3 to { double }* + %5 = getelementptr inbounds { double }, { double }* %4, i32 0, i32 0 + %6 = load double, double* %5, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { double, double }* + %9 = getelementptr inbounds { double, double }, { double, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { double, double }, { double, double }* %8, i32 0, i32 1 + store double %coord, double* %9, align 8 + store double %6, double* %10, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %fn, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %fn, i32 -1) + ret { double, double }* %8 +} + +define internal { double, double, i64 }* @Microsoft__Quantum__Optimization__UnivariateOptimizationResult__body(double %Coordinate, double %Value, i64 %NQueries) { +entry: + %0 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double, i64 }* getelementptr ({ double, double, i64 }, { double, double, i64 }* null, i32 1) to i64)) + %1 = bitcast %Tuple* %0 to { double, double, i64 }* + %2 = getelementptr inbounds { double, double, i64 }, { double, double, i64 }* %1, i32 0, i32 0 + %3 = getelementptr inbounds { double, double, i64 }, { double, double, i64 }* %1, i32 0, i32 1 + %4 = getelementptr inbounds { double, double, i64 }, { double, double, i64 }* %1, i32 0, i32 2 + store double %Coordinate, double* %2, align 8 + store double %Value, double* %3, align 8 + store i64 %NQueries, i64* %4, align 4 + ret { double, double, i64 }* %1 +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Logical__Not__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i1 }* + %1 = getelementptr inbounds { i1 }, { i1 }* %0, i32 0, i32 0 + %2 = load i1, i1* %1, align 1 + %3 = call i1 @Microsoft__Quantum__Logical__Not__body(i1 %2) + %4 = bitcast %Tuple* %result-tuple to { i1 }* + %5 = getelementptr inbounds { i1 }, { i1 }* %4, i32 0, i32 0 + store i1 %3, i1* %5, align 1 + ret void +} + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____body(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____adj(double %angle, i64 %idxTarget, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 0, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %4, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %angle, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____ctl(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____ctladj(%Array* %__controlQubits__, { double, i64, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %angle = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %register = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %paulis = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %paulis, i64 0) + %5 = bitcast i8* %4 to i2* + store i2 0, i2* %5, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %7 = bitcast i8* %6 to %Qubit** + %8 = load %Qubit*, %Qubit** %7, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %10 = bitcast i8* %9 to %Qubit** + store %Qubit* %8, %Qubit** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Array*, double, %Array* }* + %13 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 1 + %15 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %12, i32 0, i32 2 + store %Array* %paulis, %Array** %13, align 8 + store double %angle, double* %14, align 8 + store %Array* %qubits, %Array** %15, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %12) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____body(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__body(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %actualControl, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Range, i64 }* %0, %Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %1 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %1, align 4 + %2 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %2, align 4 + %3 = extractvalue %Range %rngControl, 0 + %4 = extractvalue %Range %rngControl, 1 + %5 = extractvalue %Range %rngControl, 2 + %6 = insertvalue %Range zeroinitializer, i64 %3, 0 + %7 = insertvalue %Range %6, i64 %4, 1 + %8 = insertvalue %Range %7, i64 %5, 2 + %9 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %8, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %9) + %10 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %11 = load %Array*, %Array** %10, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %12 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 1) + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %14 = bitcast i8* %13 to %Qubit** + %15 = load %Qubit*, %Qubit** %14, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__adj(double %tolerance, %Array* %disentangling, i2 %axis, { %Array* }* %__qsVar0__actualControl__, %Qubit* %15) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %actualControl = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %actualControl, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %actualControl to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %actualControl, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctl(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %tolerance = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %disentangling = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 1) + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %axis = load i2, i2* %3, align 1 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %6 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %register = load %Array*, %Array** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %7 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 0 + %rngControl = load %Range, %Range* %7, align 4 + %8 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %5, i32 0, i32 1 + %idxTarget = load i64, i64* %8, align 4 + %9 = extractvalue %Range %rngControl, 0 + %10 = extractvalue %Range %rngControl, 1 + %11 = extractvalue %Range %rngControl, 2 + %12 = insertvalue %Range zeroinitializer, i64 %9, 0 + %13 = insertvalue %Range %12, i64 %10, 1 + %14 = insertvalue %Range %13, i64 %11, 2 + %15 = call %Array* @__quantum__rt__array_slice_1d(%Array* %register, %Range %14, i1 true) + %__qsVar0__actualControl__ = call { %Array* }* @Microsoft__Quantum__Arithmetic__LittleEndian__body(%Array* %15) + %16 = getelementptr inbounds { %Array* }, { %Array* }* %__qsVar0__actualControl__, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 1) + %18 = bitcast { %Array* }* %__qsVar0__actualControl__ to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 1) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxTarget) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Array* }*, %Qubit* }* getelementptr ({ double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* null, i32 1) to i64)) + %23 = bitcast %Tuple* %22 to { double, %Array*, i2, { %Array* }*, %Qubit* }* + %24 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 0 + %25 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 1 + %26 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 2 + %27 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 3 + %28 = getelementptr inbounds { double, %Array*, i2, { %Array* }*, %Qubit* }, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23, i32 0, i32 4 + store double %tolerance, double* %24, align 8 + store %Array* %disentangling, %Array** %25, align 8 + store i2 %axis, i2* %26, align 1 + store { %Array* }* %__qsVar0__actualControl__, { %Array* }** %27, align 8 + store %Qubit* %21, %Qubit** %28, align 8 + call void @Microsoft__Quantum__Canon__ApproximatelyMultiplexPauli__ctladj(%Array* %__controlQubits__, { double, %Array*, i2, { %Array* }*, %Qubit* }* %23) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %15, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentangling, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %17, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %22, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____body(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Array* }* + %5 = getelementptr inbounds { %Array* }, { %Array* }* %4, i32 0, i32 0 + store %Array* %1, %Array** %5, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %bareOp, %Tuple* %3, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____adj(%Callable* %bareOp, { %Array* }* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %0 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %1 = load %Array*, %Array** %0, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 1) + %2 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 1) + %3 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %3) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array* }* getelementptr ({ %Array* }, { %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + store %Array* %1, %Array** %6, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %3, %Tuple* %4, %Tuple* null) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %1, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____ctl(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____ctladj(%Array* %__controlQubits__, { %Callable*, { %Array* }* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %bareOp = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 1) + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %register = load { %Array* }*, { %Array* }** %2, align 8 + %3 = getelementptr inbounds { %Array* }, { %Array* }* %register, i32 0, i32 0 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 1) + %5 = bitcast { %Array* }* %register to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 1) + %6 = call %Callable* @__quantum__rt__callable_copy(%Callable* %bareOp, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %6) + call void @__quantum__rt__callable_make_controlled(%Callable* %6) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array* }* getelementptr ({ %Array*, %Array* }, { %Array*, %Array* }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array* }* + %9 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %8, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %9, align 8 + store %Array* %4, %Array** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %6, %Tuple* %7, %Tuple* null) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %bareOp, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +define internal %Array* @Microsoft__Quantum__Preparation____QsRef1__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %coefficients, { %Range, i64 }* %0) { +entry: + %plan = alloca %Array*, align 8 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %3) + %6 = bitcast i8* %5 to { double, double }** + %7 = load { double, double }*, { double, double }** %6, align 8 + %8 = bitcast { double, double }* %7 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %8, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %10 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 0 + %rngControl = load %Range, %Range* %10, align 4 + %11 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %0, i32 0, i32 1 + %idxTarget = load i64, i64* %11, align 4 + %12 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + store %Array* %12, %Array** %plan, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 1) + %13 = call { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef1__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) + %14 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 0 + %disentanglingY = load %Array*, %Array** %14, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 1) + %15 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 1 + %disentanglingZ = load %Array*, %Array** %15, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 1) + %16 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %13, i32 0, i32 2 + %newCoefficients = load %Array*, %Array** %16, align 8 + %17 = call i64 @__quantum__rt__array_get_size_1d(%Array* %newCoefficients) + %18 = sub i64 %17, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %19 = phi i64 [ 0, %exit__1 ], [ %25, %exiting__2 ] + %20 = icmp sle i64 %19, %18 + br i1 %20, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %19) + %22 = bitcast i8* %21 to { double, double }** + %23 = load { double, double }*, { double, double }** %22, align 8 + %24 = bitcast { double, double }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %24, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %25 = add i64 %19, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 1) + %26 = call i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingZ) + br i1 %26, label %then0__1, label %continue__1 + +then0__1: ; preds = %exit__2 + %27 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 1) + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %30 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 1 + %32 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 2 + %33 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 3 + %34 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 4 + %35 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %29, i32 0, i32 5 + store %Callable* %27, %Callable** %30, align 8 + store double %tolerance, double* %31, align 8 + store %Array* %disentanglingZ, %Array** %32, align 8 + store i2 -2, i2* %33, align 1 + store %Range %rngControl, %Range* %34, align 4 + store i64 %idxTarget, i64* %35, align 4 + %36 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__32__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %28) + %37 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 0) + %39 = bitcast i8* %38 to %Callable** + store %Callable* %36, %Callable** %39, align 8 + %40 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 0) + %42 = bitcast i8* %41 to %Callable** + store %Callable* %36, %Callable** %42, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %36, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %36, i32 1) + br label %header__3 + +continue__1: ; preds = %exit__4, %exit__2 + %43 = call i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceD____body(double %tolerance, %Array* %disentanglingY) + br i1 %43, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + %44 = load %Array*, %Array** %plan, align 8 + %45 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 1) + %46 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, %Array*, i2, %Range, i64 }* getelementptr ({ %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* null, i32 1) to i64)) + %47 = bitcast %Tuple* %46 to { %Callable*, double, %Array*, i2, %Range, i64 }* + %48 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 0 + %49 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 1 + %50 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 2 + %51 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 3 + %52 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 4 + %53 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %47, i32 0, i32 5 + store %Callable* %45, %Callable** %48, align 8 + store double %tolerance, double* %49, align 8 + store %Array* %disentanglingY, %Array** %50, align 8 + store i2 -1, i2* %51, align 1 + store %Range %rngControl, %Range* %52, align 4 + store i64 %idxTarget, i64* %53, align 4 + %54 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__33__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__20__FunctionTable, %Tuple* %46) + %55 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %56 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 0) + %57 = bitcast i8* %56 to %Callable** + store %Callable* %54, %Callable** %57, align 8 + %58 = call %Array* @__quantum__rt__array_concatenate(%Array* %44, %Array* %55) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %58) + %60 = sub i64 %59, 1 + br label %header__5 + +continue__2: ; preds = %exit__9, %continue__1 + %61 = call i1 @Microsoft__Quantum__Canon__IsRangeEmpty__body(%Range %rngControl) + br i1 %61, label %then0__3, label %test1__1 + +then0__3: ; preds = %continue__2 + %62 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 0) + %63 = bitcast i8* %62 to { double, double }** + %64 = load { double, double }*, { double, double }** %63, align 8 + %65 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 0 + %abs = load double, double* %65, align 8 + %66 = getelementptr inbounds { double, double }, { double, double }* %64, i32 0, i32 1 + %arg = load double, double* %66, align 8 + %67 = call double @Microsoft__Quantum__Math__AbsD__body(double %arg) + %68 = fcmp ogt double %67, %tolerance + br i1 %68, label %then0__4, label %continue__4 + +then0__4: ; preds = %then0__3 + %69 = load %Array*, %Array** %plan, align 8 + %70 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %71 = fmul double -1.000000e+00, %arg + %72 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double, i64 }* getelementptr ({ %Callable*, double, i64 }, { %Callable*, double, i64 }* null, i32 1) to i64)) + %73 = bitcast %Tuple* %72 to { %Callable*, double, i64 }* + %74 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 0 + %75 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 1 + %76 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %73, i32 0, i32 2 + store %Callable* %70, %Callable** %74, align 8 + store double %71, double* %75, align 8 + store i64 %idxTarget, i64* %76, align 4 + %77 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__34__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__21__FunctionTable, %Tuple* %72) + %78 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %79 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 0) + %80 = bitcast i8* %79 to %Callable** + store %Callable* %77, %Callable** %80, align 8 + %81 = call %Array* @__quantum__rt__array_concatenate(%Array* %69, %Array* %78) + %82 = call i64 @__quantum__rt__array_get_size_1d(%Array* %81) + %83 = sub i64 %82, 1 + br label %header__10 + +continue__4: ; preds = %exit__14, %then0__3 + br label %continue__3 + +test1__1: ; preds = %continue__2 + %84 = call i1 @Microsoft__Quantum__Canon____QsRef1__AnyOutsideToleranceCP____body(double %tolerance, %Array* %newCoefficients) + br i1 %84, label %then1__1, label %continue__3 + +then1__1: ; preds = %test1__1 + %85 = extractvalue %Range %rngControl, 0 + %86 = extractvalue %Range %rngControl, 1 + %87 = extractvalue %Range %rngControl, 2 + %88 = add i64 %85, 1 + %89 = extractvalue %Range %rngControl, 0 + %90 = extractvalue %Range %rngControl, 1 + %91 = extractvalue %Range %rngControl, 2 + %92 = extractvalue %Range %rngControl, 0 + %93 = extractvalue %Range %rngControl, 1 + %94 = extractvalue %Range %rngControl, 2 + %95 = insertvalue %Range zeroinitializer, i64 %88, 0 + %96 = insertvalue %Range %95, i64 %90, 1 + %newControl = insertvalue %Range %96, i64 %94, 2 + %newTarget = extractvalue %Range %rngControl, 0 + %97 = extractvalue %Range %rngControl, 1 + %98 = extractvalue %Range %rngControl, 2 + %99 = load %Array*, %Array** %plan, align 8 + %100 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %101 = bitcast %Tuple* %100 to { %Range, i64 }* + %102 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 0 + %103 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %101, i32 0, i32 1 + store %Range %newControl, %Range* %102, align 4 + store i64 %newTarget, i64* %103, align 4 + %104 = call %Array* @Microsoft__Quantum__Preparation____QsRef1__ApproximatelyUnprepareArbitraryStatePlan____body(double %tolerance, %Array* %newCoefficients, { %Range, i64 }* %101) + %105 = call %Array* @__quantum__rt__array_concatenate(%Array* %99, %Array* %104) + %106 = call i64 @__quantum__rt__array_get_size_1d(%Array* %105) + %107 = sub i64 %106, 1 + br label %header__15 + +continue__3: ; preds = %exit__19, %test1__1, %continue__4 + %108 = load %Array*, %Array** %plan, align 8 + %109 = sub i64 %1, 1 + br label %header__20 + +header__3: ; preds = %exiting__3, %then0__1 + %110 = phi i64 [ 0, %then0__1 ], [ %115, %exiting__3 ] + %111 = icmp sle i64 %110, 0 + br i1 %111, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %112 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %40, i64 %110) + %113 = bitcast i8* %112 to %Callable** + %114 = load %Callable*, %Callable** %113, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %114, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %114, i32 1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %115 = add i64 %110, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %40, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %12, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %12, i32 -1) + store %Array* %40, %Array** %plan, align 8 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %116 = phi i64 [ 0, %exit__3 ], [ %121, %exiting__4 ] + %117 = icmp sle i64 %116, 0 + br i1 %117, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %37, i64 %116) + %119 = bitcast i8* %118 to %Callable** + %120 = load %Callable*, %Callable** %119, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %120, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %120, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %121 = add i64 %116, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %37, i32 -1) + br label %continue__1 + +header__5: ; preds = %exiting__5, %then0__2 + %122 = phi i64 [ 0, %then0__2 ], [ %127, %exiting__5 ] + %123 = icmp sle i64 %122, %60 + br i1 %123, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %124 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %122) + %125 = bitcast i8* %124 to %Callable** + %126 = load %Callable*, %Callable** %125, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %126, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %126, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %127 = add i64 %122, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 1) + %128 = sub i64 %59, 1 + br label %header__6 + +header__6: ; preds = %exiting__6, %exit__5 + %129 = phi i64 [ 0, %exit__5 ], [ %134, %exiting__6 ] + %130 = icmp sle i64 %129, %128 + br i1 %130, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %131 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %58, i64 %129) + %132 = bitcast i8* %131 to %Callable** + %133 = load %Callable*, %Callable** %132, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %133, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %133, i32 1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %134 = add i64 %129, 1 + br label %header__6 + +exit__6: ; preds = %header__6 + call void @__quantum__rt__array_update_alias_count(%Array* %58, i32 1) + %135 = call i64 @__quantum__rt__array_get_size_1d(%Array* %44) + %136 = sub i64 %135, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %137 = phi i64 [ 0, %exit__6 ], [ %142, %exiting__7 ] + %138 = icmp sle i64 %137, %136 + br i1 %138, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %139 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %137) + %140 = bitcast i8* %139 to %Callable** + %141 = load %Callable*, %Callable** %140, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %141, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %141, i32 -1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %142 = add i64 %137, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_alias_count(%Array* %44, i32 -1) + %143 = sub i64 %135, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %144 = phi i64 [ 0, %exit__7 ], [ %149, %exiting__8 ] + %145 = icmp sle i64 %144, %143 + br i1 %145, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %146 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %44, i64 %144) + %147 = bitcast i8* %146 to %Callable** + %148 = load %Callable*, %Callable** %147, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %148, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %148, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %149 = add i64 %144, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_reference_count(%Array* %44, i32 -1) + store %Array* %58, %Array** %plan, align 8 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %150 = phi i64 [ 0, %exit__8 ], [ %155, %exiting__9 ] + %151 = icmp sle i64 %150, 0 + br i1 %151, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %152 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %55, i64 %150) + %153 = bitcast i8* %152 to %Callable** + %154 = load %Callable*, %Callable** %153, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %154, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %154, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %155 = add i64 %150, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_reference_count(%Array* %55, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %58, i32 -1) + br label %continue__2 + +header__10: ; preds = %exiting__10, %then0__4 + %156 = phi i64 [ 0, %then0__4 ], [ %161, %exiting__10 ] + %157 = icmp sle i64 %156, %83 + br i1 %157, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %158 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %156) + %159 = bitcast i8* %158 to %Callable** + %160 = load %Callable*, %Callable** %159, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %160, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %160, i32 1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %161 = add i64 %156, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %162 = sub i64 %82, 1 + br label %header__11 + +header__11: ; preds = %exiting__11, %exit__10 + %163 = phi i64 [ 0, %exit__10 ], [ %168, %exiting__11 ] + %164 = icmp sle i64 %163, %162 + br i1 %164, label %body__11, label %exit__11 + +body__11: ; preds = %header__11 + %165 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %81, i64 %163) + %166 = bitcast i8* %165 to %Callable** + %167 = load %Callable*, %Callable** %166, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %167, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %167, i32 1) + br label %exiting__11 + +exiting__11: ; preds = %body__11 + %168 = add i64 %163, 1 + br label %header__11 + +exit__11: ; preds = %header__11 + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 1) + %169 = call i64 @__quantum__rt__array_get_size_1d(%Array* %69) + %170 = sub i64 %169, 1 + br label %header__12 + +header__12: ; preds = %exiting__12, %exit__11 + %171 = phi i64 [ 0, %exit__11 ], [ %176, %exiting__12 ] + %172 = icmp sle i64 %171, %170 + br i1 %172, label %body__12, label %exit__12 + +body__12: ; preds = %header__12 + %173 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %171) + %174 = bitcast i8* %173 to %Callable** + %175 = load %Callable*, %Callable** %174, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %175, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %175, i32 -1) + br label %exiting__12 + +exiting__12: ; preds = %body__12 + %176 = add i64 %171, 1 + br label %header__12 + +exit__12: ; preds = %header__12 + call void @__quantum__rt__array_update_alias_count(%Array* %69, i32 -1) + %177 = sub i64 %169, 1 + br label %header__13 + +header__13: ; preds = %exiting__13, %exit__12 + %178 = phi i64 [ 0, %exit__12 ], [ %183, %exiting__13 ] + %179 = icmp sle i64 %178, %177 + br i1 %179, label %body__13, label %exit__13 + +body__13: ; preds = %header__13 + %180 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %69, i64 %178) + %181 = bitcast i8* %180 to %Callable** + %182 = load %Callable*, %Callable** %181, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %182, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %182, i32 -1) + br label %exiting__13 + +exiting__13: ; preds = %body__13 + %183 = add i64 %178, 1 + br label %header__13 + +exit__13: ; preds = %header__13 + call void @__quantum__rt__array_update_reference_count(%Array* %69, i32 -1) + store %Array* %81, %Array** %plan, align 8 + br label %header__14 + +header__14: ; preds = %exiting__14, %exit__13 + %184 = phi i64 [ 0, %exit__13 ], [ %189, %exiting__14 ] + %185 = icmp sle i64 %184, 0 + br i1 %185, label %body__14, label %exit__14 + +body__14: ; preds = %header__14 + %186 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %78, i64 %184) + %187 = bitcast i8* %186 to %Callable** + %188 = load %Callable*, %Callable** %187, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %188, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %188, i32 -1) + br label %exiting__14 + +exiting__14: ; preds = %body__14 + %189 = add i64 %184, 1 + br label %header__14 + +exit__14: ; preds = %header__14 + call void @__quantum__rt__array_update_reference_count(%Array* %78, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + br label %continue__4 + +header__15: ; preds = %exiting__15, %then1__1 + %190 = phi i64 [ 0, %then1__1 ], [ %195, %exiting__15 ] + %191 = icmp sle i64 %190, %107 + br i1 %191, label %body__15, label %exit__15 + +body__15: ; preds = %header__15 + %192 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %190) + %193 = bitcast i8* %192 to %Callable** + %194 = load %Callable*, %Callable** %193, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %194, i32 1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %194, i32 1) + br label %exiting__15 + +exiting__15: ; preds = %body__15 + %195 = add i64 %190, 1 + br label %header__15 + +exit__15: ; preds = %header__15 + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 1) + %196 = sub i64 %106, 1 + br label %header__16 + +header__16: ; preds = %exiting__16, %exit__15 + %197 = phi i64 [ 0, %exit__15 ], [ %202, %exiting__16 ] + %198 = icmp sle i64 %197, %196 + br i1 %198, label %body__16, label %exit__16 + +body__16: ; preds = %header__16 + %199 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %105, i64 %197) + %200 = bitcast i8* %199 to %Callable** + %201 = load %Callable*, %Callable** %200, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %201, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %201, i32 1) + br label %exiting__16 + +exiting__16: ; preds = %body__16 + %202 = add i64 %197, 1 + br label %header__16 + +exit__16: ; preds = %header__16 + call void @__quantum__rt__array_update_alias_count(%Array* %105, i32 1) + %203 = call i64 @__quantum__rt__array_get_size_1d(%Array* %99) + %204 = sub i64 %203, 1 + br label %header__17 + +header__17: ; preds = %exiting__17, %exit__16 + %205 = phi i64 [ 0, %exit__16 ], [ %210, %exiting__17 ] + %206 = icmp sle i64 %205, %204 + br i1 %206, label %body__17, label %exit__17 + +body__17: ; preds = %header__17 + %207 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %205) + %208 = bitcast i8* %207 to %Callable** + %209 = load %Callable*, %Callable** %208, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %209, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %209, i32 -1) + br label %exiting__17 + +exiting__17: ; preds = %body__17 + %210 = add i64 %205, 1 + br label %header__17 + +exit__17: ; preds = %header__17 + call void @__quantum__rt__array_update_alias_count(%Array* %99, i32 -1) + %211 = sub i64 %203, 1 + br label %header__18 + +header__18: ; preds = %exiting__18, %exit__17 + %212 = phi i64 [ 0, %exit__17 ], [ %217, %exiting__18 ] + %213 = icmp sle i64 %212, %211 + br i1 %213, label %body__18, label %exit__18 + +body__18: ; preds = %header__18 + %214 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %99, i64 %212) + %215 = bitcast i8* %214 to %Callable** + %216 = load %Callable*, %Callable** %215, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %216, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %216, i32 -1) + br label %exiting__18 + +exiting__18: ; preds = %body__18 + %217 = add i64 %212, 1 + br label %header__18 + +exit__18: ; preds = %header__18 + call void @__quantum__rt__array_update_reference_count(%Array* %99, i32 -1) + store %Array* %105, %Array** %plan, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %100, i32 -1) + %218 = call i64 @__quantum__rt__array_get_size_1d(%Array* %104) + %219 = sub i64 %218, 1 + br label %header__19 + +header__19: ; preds = %exiting__19, %exit__18 + %220 = phi i64 [ 0, %exit__18 ], [ %225, %exiting__19 ] + %221 = icmp sle i64 %220, %219 + br i1 %221, label %body__19, label %exit__19 + +body__19: ; preds = %header__19 + %222 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %104, i64 %220) + %223 = bitcast i8* %222 to %Callable** + %224 = load %Callable*, %Callable** %223, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %224, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %224, i32 -1) + br label %exiting__19 + +exiting__19: ; preds = %body__19 + %225 = add i64 %220, 1 + br label %header__19 + +exit__19: ; preds = %header__19 + call void @__quantum__rt__array_update_reference_count(%Array* %104, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %105, i32 -1) + br label %continue__3 + +header__20: ; preds = %exiting__20, %continue__3 + %226 = phi i64 [ 0, %continue__3 ], [ %232, %exiting__20 ] + %227 = icmp sle i64 %226, %109 + br i1 %227, label %body__20, label %exit__20 + +body__20: ; preds = %header__20 + %228 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %226) + %229 = bitcast i8* %228 to { double, double }** + %230 = load { double, double }*, { double, double }** %229, align 8 + %231 = bitcast { double, double }* %230 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %231, i32 -1) + br label %exiting__20 + +exiting__20: ; preds = %body__20 + %232 = add i64 %226, 1 + br label %header__20 + +exit__20: ; preds = %header__20 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + %233 = call i64 @__quantum__rt__array_get_size_1d(%Array* %108) + %234 = sub i64 %233, 1 + br label %header__21 + +header__21: ; preds = %exiting__21, %exit__20 + %235 = phi i64 [ 0, %exit__20 ], [ %240, %exiting__21 ] + %236 = icmp sle i64 %235, %234 + br i1 %236, label %body__21, label %exit__21 + +body__21: ; preds = %header__21 + %237 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %108, i64 %235) + %238 = bitcast i8* %237 to %Callable** + %239 = load %Callable*, %Callable** %238, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %239, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %239, i32 -1) + br label %exiting__21 + +exiting__21: ; preds = %body__21 + %240 = add i64 %235, 1 + br label %header__21 + +exit__21: ; preds = %header__21 + call void @__quantum__rt__array_update_alias_count(%Array* %108, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %disentanglingZ, i32 -1) + %241 = sub i64 %17, 1 + br label %header__22 + +header__22: ; preds = %exiting__22, %exit__21 + %242 = phi i64 [ 0, %exit__21 ], [ %248, %exiting__22 ] + %243 = icmp sle i64 %242, %241 + br i1 %243, label %body__22, label %exit__22 + +body__22: ; preds = %header__22 + %244 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %242) + %245 = bitcast i8* %244 to { double, double }** + %246 = load { double, double }*, { double, double }** %245, align 8 + %247 = bitcast { double, double }* %246 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %247, i32 -1) + br label %exiting__22 + +exiting__22: ; preds = %body__22 + %248 = add i64 %242, 1 + br label %header__22 + +exit__22: ; preds = %header__22 + call void @__quantum__rt__array_update_alias_count(%Array* %newCoefficients, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingY, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %disentanglingZ, i32 -1) + %249 = sub i64 %17, 1 + br label %header__23 + +header__23: ; preds = %exiting__23, %exit__22 + %250 = phi i64 [ 0, %exit__22 ], [ %256, %exiting__23 ] + %251 = icmp sle i64 %250, %249 + br i1 %251, label %body__23, label %exit__23 + +body__23: ; preds = %header__23 + %252 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %newCoefficients, i64 %250) + %253 = bitcast i8* %252 to { double, double }** + %254 = load { double, double }*, { double, double }** %253, align 8 + %255 = bitcast { double, double }* %254 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %255, i32 -1) + br label %exiting__23 + +exiting__23: ; preds = %body__23 + %256 = add i64 %250, 1 + br label %header__23 + +exit__23: ; preds = %header__23 + call void @__quantum__rt__array_update_reference_count(%Array* %newCoefficients, i32 -1) + %257 = bitcast { %Array*, %Array*, %Array* }* %13 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %257, i32 -1) + ret %Array* %108 +} + +define internal { %Array*, %Array*, %Array* }* @Microsoft__Quantum__Preparation____QsRef1__StatePreparationSBMComputeCoefficients____body(%Array* %coefficients) { +entry: + %newCoefficients = alloca %Array*, align 8 + %disentanglingY = alloca %Array*, align 8 + %disentanglingZ = alloca %Array*, align 8 + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %coefficients) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %2) + %5 = bitcast i8* %4 to { double, double }** + %6 = load { double, double }*, { double, double }** %5, align 8 + %7 = bitcast { double, double }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 1) + %9 = sdiv i64 %0, 2 + %10 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %9) + %11 = sub i64 %9, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %16, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %12) + %15 = bitcast i8* %14 to double* + store double 0.000000e+00, double* %15, align 8 + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %16 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + store %Array* %10, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + %17 = sdiv i64 %0, 2 + %18 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %17) + %19 = sub i64 %17, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %20 = phi i64 [ 0, %exit__2 ], [ %24, %exiting__3 ] + %21 = icmp sle i64 %20, %19 + br i1 %21, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %18, i64 %20) + %23 = bitcast i8* %22 to double* + store double 0.000000e+00, double* %23, align 8 + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %24 = add i64 %20, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + store %Array* %18, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %18, i32 1) + %25 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double 0.000000e+00, double 0.000000e+00) + %26 = sdiv i64 %0, 2 + %27 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %26) + %28 = sub i64 %26, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %29 = phi i64 [ 0, %exit__3 ], [ %34, %exiting__4 ] + %30 = icmp sle i64 %29, %28 + br i1 %30, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %29) + %32 = bitcast i8* %31 to { double, double }** + store { double, double }* %25, { double, double }** %32, align 8 + %33 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %33, i32 1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %34 = add i64 %29, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + store %Array* %27, %Array** %newCoefficients, align 8 + %35 = sub i64 %26, 1 + br label %header__5 + +header__5: ; preds = %exiting__5, %exit__4 + %36 = phi i64 [ 0, %exit__4 ], [ %42, %exiting__5 ] + %37 = icmp sle i64 %36, %35 + br i1 %37, label %body__5, label %exit__5 + +body__5: ; preds = %header__5 + %38 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %27, i64 %36) + %39 = bitcast i8* %38 to { double, double }** + %40 = load { double, double }*, { double, double }** %39, align 8 + %41 = bitcast { double, double }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %41, i32 1) + br label %exiting__5 + +exiting__5: ; preds = %body__5 + %42 = add i64 %36, 1 + br label %header__5 + +exit__5: ; preds = %header__5 + call void @__quantum__rt__array_update_alias_count(%Array* %27, i32 1) + %43 = sub i64 %0, 1 + br label %preheader__1 + +preheader__1: ; preds = %exit__5 + br label %header__6 + +header__6: ; preds = %exiting__6, %preheader__1 + %idxCoeff = phi i64 [ 0, %preheader__1 ], [ %80, %exiting__6 ] + %44 = icmp sle i64 %idxCoeff, %43 + %45 = icmp sge i64 %idxCoeff, %43 + %46 = select i1 true, i1 %44, i1 %45 + br i1 %46, label %body__6, label %exit__6 + +body__6: ; preds = %header__6 + %47 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %idxCoeff) + %48 = bitcast i8* %47 to { double, double }** + %49 = load { double, double }*, { double, double }** %48, align 8 + %50 = add i64 %idxCoeff, 1 + %51 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %50) + %52 = bitcast i8* %51 to { double, double }** + %53 = load { double, double }*, { double, double }** %52, align 8 + %54 = call { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %49, { double, double }* %53) + %55 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 0 + %rt = load { double, double }*, { double, double }** %55, align 8 + %56 = bitcast { double, double }* %rt to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + %57 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 1 + %phi = load double, double* %57, align 8 + %58 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %54, i32 0, i32 2 + %theta = load double, double* %58, align 8 + %59 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %59, i32 -1) + %60 = call %Array* @__quantum__rt__array_copy(%Array* %59, i1 false) + %61 = fmul double 5.000000e-01, %phi + %62 = sdiv i64 %idxCoeff, 2 + %63 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %60, i64 %62) + %64 = bitcast i8* %63 to double* + store double %61, double* %64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %60, i32 1) + store %Array* %60, %Array** %disentanglingZ, align 8 + %65 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %65, i32 -1) + %66 = call %Array* @__quantum__rt__array_copy(%Array* %65, i1 false) + %67 = fmul double 5.000000e-01, %theta + %68 = sdiv i64 %idxCoeff, 2 + %69 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %66, i64 %68) + %70 = bitcast i8* %69 to double* + %71 = load double, double* %70, align 8 + store double %67, double* %70, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %66, i32 1) + store %Array* %66, %Array** %disentanglingY, align 8 + %72 = load %Array*, %Array** %newCoefficients, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %72, i32 -1) + %73 = call %Array* @__quantum__rt__array_copy(%Array* %72, i1 false) + %74 = sdiv i64 %idxCoeff, 2 + %75 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %73, i64 %74) + %76 = bitcast i8* %75 to { double, double }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 1) + %77 = load { double, double }*, { double, double }** %76, align 8 + %78 = bitcast { double, double }* %77 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %78, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %78, i32 -1) + store { double, double }* %rt, { double, double }** %76, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %73, i32 1) + store %Array* %73, %Array** %newCoefficients, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %56, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %56, i32 -1) + %79 = bitcast { { double, double }*, double, double }* %54 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %79, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %59, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %65, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %72, i32 -1) + br label %exiting__6 + +exiting__6: ; preds = %body__6 + %80 = add i64 %idxCoeff, 2 + br label %header__6 + +exit__6: ; preds = %header__6 + %81 = load %Array*, %Array** %disentanglingY, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 1) + %82 = load %Array*, %Array** %disentanglingZ, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 1) + %83 = load %Array*, %Array** %newCoefficients, align 8 + %84 = call i64 @__quantum__rt__array_get_size_1d(%Array* %83) + %85 = sub i64 %84, 1 + br label %header__7 + +header__7: ; preds = %exiting__7, %exit__6 + %86 = phi i64 [ 0, %exit__6 ], [ %92, %exiting__7 ] + %87 = icmp sle i64 %86, %85 + br i1 %87, label %body__7, label %exit__7 + +body__7: ; preds = %header__7 + %88 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %86) + %89 = bitcast i8* %88 to { double, double }** + %90 = load { double, double }*, { double, double }** %89, align 8 + %91 = bitcast { double, double }* %90 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %91, i32 1) + br label %exiting__7 + +exiting__7: ; preds = %body__7 + %92 = add i64 %86, 1 + br label %header__7 + +exit__7: ; preds = %header__7 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 1) + %93 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Array* }* getelementptr ({ %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* null, i32 1) to i64)) + %94 = bitcast %Tuple* %93 to { %Array*, %Array*, %Array* }* + %95 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 0 + %96 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 1 + %97 = getelementptr inbounds { %Array*, %Array*, %Array* }, { %Array*, %Array*, %Array* }* %94, i32 0, i32 2 + store %Array* %81, %Array** %95, align 8 + store %Array* %82, %Array** %96, align 8 + store %Array* %83, %Array** %97, align 8 + %98 = sub i64 %0, 1 + br label %header__8 + +header__8: ; preds = %exiting__8, %exit__7 + %99 = phi i64 [ 0, %exit__7 ], [ %105, %exiting__8 ] + %100 = icmp sle i64 %99, %98 + br i1 %100, label %body__8, label %exit__8 + +body__8: ; preds = %header__8 + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coefficients, i64 %99) + %102 = bitcast i8* %101 to { double, double }** + %103 = load { double, double }*, { double, double }** %102, align 8 + %104 = bitcast { double, double }* %103 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %104, i32 -1) + br label %exiting__8 + +exiting__8: ; preds = %body__8 + %105 = add i64 %99, 1 + br label %header__8 + +exit__8: ; preds = %header__8 + call void @__quantum__rt__array_update_alias_count(%Array* %coefficients, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %81, i32 -1) + %106 = sub i64 %84, 1 + br label %header__9 + +header__9: ; preds = %exiting__9, %exit__8 + %107 = phi i64 [ 0, %exit__8 ], [ %113, %exiting__9 ] + %108 = icmp sle i64 %107, %106 + br i1 %108, label %body__9, label %exit__9 + +body__9: ; preds = %header__9 + %109 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %107) + %110 = bitcast i8* %109 to { double, double }** + %111 = load { double, double }*, { double, double }** %110, align 8 + %112 = bitcast { double, double }* %111 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %112, i32 -1) + br label %exiting__9 + +exiting__9: ; preds = %body__9 + %113 = add i64 %107, 1 + br label %header__9 + +exit__9: ; preds = %header__9 + call void @__quantum__rt__array_update_alias_count(%Array* %83, i32 -1) + %114 = bitcast { double, double }* %25 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %114, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %82, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %81, i32 -1) + %115 = sub i64 %84, 1 + br label %header__10 + +header__10: ; preds = %exiting__10, %exit__9 + %116 = phi i64 [ 0, %exit__9 ], [ %122, %exiting__10 ] + %117 = icmp sle i64 %116, %115 + br i1 %117, label %body__10, label %exit__10 + +body__10: ; preds = %header__10 + %118 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %83, i64 %116) + %119 = bitcast i8* %118 to { double, double }** + %120 = load { double, double }*, { double, double }** %119, align 8 + %121 = bitcast { double, double }* %120 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %121, i32 -1) + br label %exiting__10 + +exiting__10: ; preds = %body__10 + %122 = add i64 %116, 1 + br label %header__10 + +exit__10: ; preds = %header__10 + call void @__quantum__rt__array_update_reference_count(%Array* %83, i32 -1) + ret { %Array*, %Array*, %Array* }* %94 +} + +define internal void @Lifted__PartialApplication__32__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__32__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____body(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %1 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 2 + %4 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 3 + %5 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %0, i32 0, i32 4 + %6 = load double, double* %1, align 8 + %7 = load %Array*, %Array** %2, align 8 + %8 = load i2, i2* %3, align 1 + %9 = load { %Range, i64 }*, { %Range, i64 }** %4, align 8 + %10 = load %Array*, %Array** %5, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____adj(double %6, %Array* %7, i2 %8, { %Range, i64 }* %9, %Array* %10) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____ctl(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Array*, i2, { %Range, i64 }*, %Array* }*, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyMultiplexStep____ctladj(%Array* %3, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__20__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__20__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %4, i32 %count-change) + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %6 = load %Range, %Range* %5, align 4 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @Lifted__PartialApplication__33__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %26, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %1 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 2 + %4 = load %Array*, %Array** %3, align 8 + %5 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 3 + %6 = load i2, i2* %5, align 1 + %7 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 4 + %8 = load %Range, %Range* %7, align 4 + %9 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 5 + %10 = load i64, i64* %9, align 4 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Range, i64 }* + %13 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %12, i32 0, i32 1 + store %Range %8, %Range* %13, align 4 + store i64 %10, i64* %14, align 4 + %15 = bitcast %Tuple* %arg-tuple to { %Array* }* + %16 = getelementptr inbounds { %Array* }, { %Array* }* %15, i32 0, i32 0 + %17 = load %Array*, %Array** %16, align 8 + %18 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %19 = bitcast %Tuple* %18 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %20 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 0 + %21 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 1 + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 2 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 3 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %19, i32 0, i32 4 + store double %2, double* %20, align 8 + store %Array* %4, %Array** %21, align 8 + store i2 %6, i2* %22, align 1 + store { %Range, i64 }* %12, { %Range, i64 }** %23, align 8 + store %Array* %17, %Array** %24, align 8 + %25 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %0, i32 0, i32 0 + %26 = load %Callable*, %Callable** %25, align 8 + %27 = call %Callable* @__quantum__rt__callable_copy(%Callable* %26, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %27) + call void @__quantum__rt__callable_invoke(%Callable* %27, %Tuple* %18, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %18, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %27, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %27, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__33__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, %Array*, i2, %Range, i64 }* + %6 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 2 + %9 = load %Array*, %Array** %8, align 8 + %10 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 3 + %11 = load i2, i2* %10, align 1 + %12 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 4 + %13 = load %Range, %Range* %12, align 4 + %14 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 5 + %15 = load i64, i64* %14, align 4 + %16 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Range, i64 }* getelementptr ({ %Range, i64 }, { %Range, i64 }* null, i32 1) to i64)) + %17 = bitcast %Tuple* %16 to { %Range, i64 }* + %18 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 0 + %19 = getelementptr inbounds { %Range, i64 }, { %Range, i64 }* %17, i32 0, i32 1 + store %Range %13, %Range* %18, align 4 + store i64 %15, i64* %19, align 4 + %20 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Array*, i2, { %Range, i64 }*, %Array* }* getelementptr ({ double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* null, i32 1) to i64)) + %21 = bitcast %Tuple* %20 to { double, %Array*, i2, { %Range, i64 }*, %Array* }* + %22 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 0 + %23 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 1 + %24 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 2 + %25 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 3 + %26 = getelementptr inbounds { double, %Array*, i2, { %Range, i64 }*, %Array* }, { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, i32 0, i32 4 + store double %7, double* %22, align 8 + store %Array* %9, %Array** %23, align 8 + store i2 %11, i2* %24, align 1 + store { %Range, i64 }* %17, { %Range, i64 }** %25, align 8 + store %Array* %4, %Array** %26, align 8 + %27 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* getelementptr ({ %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* null, i32 1) to i64)) + %28 = bitcast %Tuple* %27 to { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* + %29 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 0 + %30 = getelementptr inbounds { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }, { %Array*, { double, %Array*, i2, { %Range, i64 }*, %Array* }* }* %28, i32 0, i32 1 + store %Array* %3, %Array** %29, align 8 + store { double, %Array*, i2, { %Range, i64 }*, %Array* }* %21, { double, %Array*, i2, { %Range, i64 }*, %Array* }** %30, align 8 + %31 = getelementptr inbounds { %Callable*, double, %Array*, i2, %Range, i64 }, { %Callable*, double, %Array*, i2, %Range, i64 }* %5, i32 0, i32 0 + %32 = load %Callable*, %Callable** %31, align 8 + %33 = call %Callable* @__quantum__rt__callable_copy(%Callable* %32, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %33) + call void @__quantum__rt__callable_make_controlled(%Callable* %33) + call void @__quantum__rt__callable_invoke(%Callable* %33, %Tuple* %27, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %27, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %33, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %33, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %14, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 2 + %4 = load i64, i64* %3, align 4 + %5 = bitcast %Tuple* %arg-tuple to { %Array* }* + %6 = getelementptr inbounds { %Array* }, { %Array* }* %5, i32 0, i32 0 + %7 = load %Array*, %Array** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, i64, %Array* }* + %10 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 1 + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %9, i32 0, i32 2 + store double %2, double* %10, align 8 + store i64 %4, i64* %11, align 4 + store %Array* %7, %Array** %12, align 8 + %13 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %14 = load %Callable*, %Callable** %13, align 8 + %15 = call %Callable* @__quantum__rt__callable_copy(%Callable* %14, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %15) + call void @__quantum__rt__callable_invoke(%Callable* %15, %Tuple* %8, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %15, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %15, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__34__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Array* }* + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Array*, %Array** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %6 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 2 + %9 = load i64, i64* %8, align 4 + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i64, %Array* }* getelementptr ({ double, i64, %Array* }, { double, i64, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { double, i64, %Array* }* + %12 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %11, i32 0, i32 2 + store double %7, double* %12, align 8 + store i64 %9, i64* %13, align 4 + store %Array* %4, %Array** %14, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, i64, %Array* }* }* getelementptr ({ %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, { double, i64, %Array* }* }* + %17 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %16, i32 0, i32 1 + store %Array* %3, %Array** %17, align 8 + store { double, i64, %Array* }* %11, { double, i64, %Array* }** %18, align 8 + %19 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %5, i32 0, i32 0 + %20 = load %Callable*, %Callable** %19, align 8 + %21 = call %Callable* @__quantum__rt__callable_copy(%Callable* %20, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %21) + call void @__quantum__rt__callable_make_controlled(%Callable* %21) + call void @__quantum__rt__callable_invoke(%Callable* %21, %Tuple* %15, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %21, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %21, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____body(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, i64, %Array* }* + %1 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 1 + %3 = getelementptr inbounds { double, i64, %Array* }, { double, i64, %Array* }* %0, i32 0, i32 2 + %4 = load double, double* %1, align 8 + %5 = load i64, i64* %2, align 4 + %6 = load %Array*, %Array** %3, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____adj(double %4, i64 %5, %Array* %6) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____ctl(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, i64, %Array* }* }* + %1 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, i64, %Array* }* }, { %Array*, { double, i64, %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, i64, %Array* }*, { double, i64, %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyGlobalRotationStep____ctladj(%Array* %3, { double, i64, %Array* }* %4) + ret void +} + +define internal void @MemoryManagement__21__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__21__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double, i64 }* + %1 = getelementptr inbounds { %Callable*, double, i64 }, { %Callable*, double, i64 }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal { { double, double }*, double, double }* @Microsoft__Quantum__Preparation__BlochSphereCoordinates__body({ double, double }* %a0, { double, double }* %a1) { +entry: + %0 = bitcast { double, double }* %a0 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 1) + %1 = bitcast { double, double }* %a1 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 1) + %abs0 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a0) + %abs1 = call double @Microsoft__Quantum__Math__AbsComplexPolar__body({ double, double }* %a1) + %arg0 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a0) + %arg1 = call double @Microsoft__Quantum__Math__ArgComplexPolar__body({ double, double }* %a1) + %2 = fmul double %abs0, %abs0 + %3 = fmul double %abs1, %abs1 + %d = fadd double %2, %3 + %r = call double @__quantum__qis__sqrt__body(double %d) + %4 = fadd double %arg0, %arg1 + %t = fmul double 5.000000e-01, %4 + %phi = fsub double %arg1, %arg0 + %5 = call double @__quantum__qis__arctan2__body(double %abs1, double %abs0) + %theta = fmul double 2.000000e+00, %5 + %6 = call { double, double }* @Microsoft__Quantum__Math__ComplexPolar__body(double %r, double %t) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ { double, double }*, double, double }* getelementptr ({ { double, double }*, double, double }, { { double, double }*, double, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { { double, double }*, double, double }* + %9 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { { double, double }*, double, double }, { { double, double }*, double, double }* %8, i32 0, i32 2 + store { double, double }* %6, { double, double }** %9, align 8 + store double %phi, double* %10, align 8 + store double %theta, double* %11, align 8 + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %0, i32 -1) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %1, i32 -1) + ret { { double, double }*, double, double }* %8 +} + +define internal void @Lifted__PartialApplication__35__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %9, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %2 = load %Callable*, %Callable** %1, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Callable*, { %Array* }* }* + %5 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %4, i32 0, i32 1 + store %Callable* %2, %Callable** %5, align 8 + %7 = bitcast %Tuple* %arg-tuple to { %Array* }* + store { %Array* }* %7, { %Array* }** %6, align 8 + %8 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %9 = load %Callable*, %Callable** %8, align 8 + %10 = call %Callable* @__quantum__rt__callable_copy(%Callable* %9, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %10) + call void @__quantum__rt__callable_invoke(%Callable* %10, %Tuple* %3, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__35__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Array* }* }* + %1 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Array* }* }, { %Array*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %6 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 1 + %7 = load %Callable*, %Callable** %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, { %Array* }* }* getelementptr ({ %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { %Callable*, { %Array* }* }* + %10 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %9, i32 0, i32 1 + store %Callable* %7, %Callable** %10, align 8 + store { %Array* }* %4, { %Array* }** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { %Callable*, { %Array* }* }* }* getelementptr ({ %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { %Callable*, { %Array* }* }* }* + %14 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { %Callable*, { %Array* }* }* %9, { %Callable*, { %Array* }* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____body(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Callable*, { %Array* }* }* + %1 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Callable*, { %Array* }* }, { %Callable*, { %Array* }* }* %0, i32 0, i32 1 + %3 = load %Callable*, %Callable** %1, align 8 + %4 = load { %Array* }*, { %Array* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____adj(%Callable* %3, { %Array* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____ctl(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { %Callable*, { %Array* }* }* }* + %1 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { %Callable*, { %Array* }* }* }, { %Array*, { %Callable*, { %Array* }* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { %Callable*, { %Array* }* }*, { %Callable*, { %Array* }* }** %2, align 8 + call void @Microsoft__Quantum__Preparation____QsRef1__ApplyToLittleEndian____ctladj(%Array* %3, { %Callable*, { %Array* }* }* %4) + ret void +} + +define internal void @MemoryManagement__22__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__22__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, %Callable* }* + %1 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + %3 = getelementptr inbounds { %Callable*, %Callable* }, { %Callable*, %Callable* }* %0, i32 0, i32 1 + %4 = load %Callable*, %Callable** %3, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %4, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +declare double @__quantum__qis__drawrandomdouble__body(double, double) + +define internal double @Microsoft__Quantum__Random__DrawRandomDouble__body(double %min, double %max) { +entry: + %0 = call double @__quantum__qis__drawrandomdouble__body(double %min, double %max) + ret double %0 +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__body(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__body(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__adj(%Array* %paulis, double %theta, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__exp__adj(%Array* %paulis, double %theta, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctl(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 0 + %paulis = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 1) + %2 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %0, i32 0, i32 2 + %qubits = load %Array*, %Array** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, double, %Array* }* getelementptr ({ %Array*, double, %Array* }, { %Array*, double, %Array* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { %Array*, double, %Array* }* + %6 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { %Array*, double, %Array* }, { %Array*, double, %Array* }* %5, i32 0, i32 2 + store %Array* %paulis, %Array** %6, align 8 + store double %theta, double* %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + call void @__quantum__qis__exp__ctladj(%Array* %__controlQubits__, { %Array*, double, %Array* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %paulis, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__h__ctl(%Array*, %Qubit*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__adj(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, double, %Qubit* }* + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 2 + store i2 %pauli, i2* %6, align 1 + store double %theta, double* %7, align 8 + store %Qubit* %qubit, %Qubit** %8, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, double, %Qubit* }* + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 2 + store i2 %pauli, i2* %6, align 1 + store double %theta, double* %7, align 8 + store %Qubit* %qubit, %Qubit** %8, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare %Result* @__quantum__rt__result_get_one() + +declare void @__quantum__qis__s__body(%Qubit*) + +declare void @__quantum__qis__s__adj(%Qubit*) + +declare void @__quantum__qis__s__ctl(%Array*, %Qubit*) + +declare void @__quantum__qis__s__ctladj(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__z__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +attributes #0 = { nofree nosync nounwind readnone speculatable willreturn } diff --git a/src/munchkin/tests/qsharp/qaoa/QAOA.csproj b/src/munchkin/tests/qsharp/qaoa/QAOA.csproj new file mode 100644 index 0000000..2c1de19 --- /dev/null +++ b/src/munchkin/tests/qsharp/qaoa/QAOA.csproj @@ -0,0 +1,16 @@ + + + Exe + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + Always + + + + diff --git a/src/munchkin/tests/qsharp/qaoa/QAOA.qs b/src/munchkin/tests/qsharp/qaoa/QAOA.qs new file mode 100644 index 0000000..a240bf7 --- /dev/null +++ b/src/munchkin/tests/qsharp/qaoa/QAOA.qs @@ -0,0 +1,268 @@ +namespace Microsoft.Quantum.Samples.QAOA { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Measurement; + open Microsoft.Quantum.Convert; + open Microsoft.Quantum.Arrays; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Diagnostics; + + /// # Summary + /// This operation applies the X-rotation to each qubit. We can think of it as time + /// evolution induced by applying a Hamiltonian that sums over all X rotations. + /// + /// # Description + /// The driver Hamiltonian is defined as: + /// H = - \sum_i X_i for time t. + /// + /// # Input + /// ## time + /// Time passed in evolution of X rotation + /// ## target + /// Target qubit register + operation ApplyDriverHamiltonian(time: Double, target: Qubit[]) : Unit is Adj + Ctl { + ApplyToEachCA(Rx(-2.0 * time, _), target); + } + + /// # Summary + /// This applies the Z-rotation according to the instance Hamiltonian. + /// We can think of it as Hamiltonian time evolution for time t induced + /// by an Ising Hamiltonian. The Ising Hamiltonian sums over all connected + /// pairs of Pauli-Z operations Z_i and Z_j scaled by a factor J_ij, plus + /// the sum over all Z_i scaled by a factor h_i. + /// + /// # Description + /// The Ising Hamiltonian is defined as: + /// $\sum_ij J_ij Z_i Z_j + \sum_i h_i Z_i$. + /// + /// # Input + /// ## time + /// Time point in evolution. + /// ## weights + /// Ising magnetic field or "weights" encoding the constraints of our + /// traveling Santa problem. + /// ## coupling + /// Ising coupling term or "penalty" encoding the constraints of our + /// traveling Santa problem. + /// ## target + /// Qubit register that encodes the Spin values in the Ising Hamiltonian. + operation ApplyInstanceHamiltonian( + numSegments : Int, + time : Double, + weights : Double[], + coupling : Double[], + target : Qubit[] + ) : Unit { + use auxiliary = Qubit(); + for (h, qubit) in Zipped(weights, target) { + Rz(2.0 * time * h, qubit); + } + for i in 0..5 { + for j in i + 1..5 { + within { + CNOT(target[i], auxiliary); + CNOT(target[j], auxiliary); + } apply { + Rz(2.0 * time * coupling[numSegments * i + j], auxiliary); + } + } + } + } + + /// # Summary + /// Calculate Hamiltonian parameters based on the given costs and penalty. + /// + /// # Input + /// ## segmentCosts + /// Cost values of each segment. + /// ## penalty + /// Penalty for cases that don't meet constraints. + /// + /// # Output + /// ## weights + /// Hamiltonian parameters or "weights" as an array where each element corresponds + /// to a parameter h_j for qubit state j. + /// ## numSegments + /// Number of segments in the graph that describes possible paths. + function HamiltonianWeights( + segmentCosts : Double[], + penalty : Double, + numSegments : Int + ) : Double[] { + mutable weights = new Double[numSegments]; + for i in 0..numSegments - 1 { + set weights w/= i <- 4.0 * penalty - 0.5 * segmentCosts[i]; + } + return weights; + } + + /// # Summary + /// Calculate Hamiltonian coupling parameters based on the given penalty. + /// + /// # Input + /// ## penalty + /// Penalty for cases that don't meet constraints. + /// ## numSegments + /// Number of segments in the graph that describes possible paths. + /// + /// # Output + /// ## coupling + /// Hamiltonian coupling parameters as an array, where each element corresponds + /// to a parameter J_ij between qubit states i and j. + function HamiltonianCouplings(penalty : Double, numSegments : Int) : Double[] { + // Calculate Hamiltonian coupling parameters based on the given costs and penalty + // Most elements of J_ij equal 2*penalty, so set all elements to this value, + // then overwrite the exceptions. This is currently implemented for the + // example with 6 segments. + EqualityFactI(numSegments, 6, + "Currently, HamiltonianCouplings only supports given constraints for 6 segments." + ); + return ConstantArray(numSegments * numSegments, 2.0 * penalty) + w/ 2 <- penalty + w/ 9 <- penalty + w/ 29 <- penalty; + } + + /// # Summary + /// Perform the QAOA algorithm for this Ising Hamiltonian + /// + /// # Input + /// ## numSegments + /// Number of segments in graph + /// ## weights + /// Instance Hamiltonian parameters or "weights" as an array where each + /// element corresponds to a parameter h_j for qubit state j. + /// ## couplings + /// Instance Hamiltonian coupling parameters as an array, where each + /// element corresponds to a parameter J_ij between qubit states i and j. + /// ## timeX + /// Time evolution for PauliX operations + /// ## timeZ + /// Time evolution for PauliX operations + operation PerformQAOA( + numSegments : Int, + weights : Double[], + couplings : Double[], + timeX : Double[], + timeZ : Double[] + ) : Bool[] { + EqualityFactI(Length(timeX), Length(timeZ), "timeZ and timeX are not the same length"); + + // Run the QAOA circuit + mutable result = new Bool[numSegments]; + use x = Qubit[numSegments]; + ApplyToEach(H, x); // prepare the uniform distribution + for (tz, tx) in Zipped(timeZ, timeX) { + ApplyInstanceHamiltonian(numSegments, tz, weights, couplings, x); // do Exp(-i H_C tz) + ApplyDriverHamiltonian(tx, x); // do Exp(-i H_0 tx) + } + return ResultArrayAsBoolArray(MultiM(x)); // measure in the computational basis + } + + /// # Summary + /// Calculate the total cost for the given result. + /// + /// # Input + /// ## segmentCosts + /// Array of costs per segment + /// ## usedSegments + /// Array of which segments are used + /// + /// # Output + /// ## finalCost + /// Calculated cost of given path + function CalculatedCost(segmentCosts : Double[], usedSegments : Bool[]) : Double { + mutable finalCost = 0.0; + for (cost, segment) in Zipped(segmentCosts, usedSegments) { + set finalCost += segment ? cost | 0.0; + } + return finalCost; + } + + /// # Summary + /// Final check to determine if the used segments satisfy our known + /// constraints. This function is implemented to consider a graph with 6 + /// segments and three valid connected paths. + /// + /// # Input + /// ## numSegments + /// Number of segments in the graph + /// ## usedSegments + /// Array of which segments were used + /// + /// # Output + /// ## output + /// Boolean value whether the conditions are satisfied. + function IsSatisfactory(numSegments: Int, usedSegments : Bool[]) : Bool { + EqualityFactI(numSegments, 6, + "Currently, IsSatisfactory only supports constraints for 6 segments." + ); + mutable hammingWeight = 0; + for segment in usedSegments { + set hammingWeight += segment ? 1 | 0; + } + if (hammingWeight != 4 + or usedSegments[0] != usedSegments[2] + or usedSegments[1] != usedSegments[3] + or usedSegments[4] != usedSegments[5]) { + return false; + } + return true; + } + + /// # Summary + /// Run QAOA for a given number of trials on 6 qubits. This sample is based + /// on the Traveling Santa Problem outlined here: + /// http://quantumalgorithmzoo.org/traveling_santa/. + /// Reports on the best itinerary for the Traveling Santa Problem and how + /// many of the runs resulted in the answer. This should typically return + /// the optimal solution roughly 71% of the time. + /// + /// # Input + /// ## numTrials + /// Number of trials to run the QAOA algorithm for. + @EntryPoint() + operation RunQAOATrials() : Unit { + let numTrials = 20; + let penalty = 20.0; + let segmentCosts = [4.70, 9.09, 9.03, 5.70, 8.02, 1.71]; + let timeX = [0.619193, 0.742566, 0.060035, -1.568955, 0.045490]; + let timeZ = [3.182203, -1.139045, 0.221082, 0.537753, -0.417222]; + let limit = 1E-6; + let numSegments = 6; + + mutable bestCost = 100.0 * penalty; + mutable bestItinerary = [false, false, false, false, false]; + mutable successNumber = 0; + + let weights = HamiltonianWeights(segmentCosts, penalty, numSegments); + let couplings = HamiltonianCouplings(penalty, numSegments); + + for trial in 1..numTrials { + let result = PerformQAOA( + numSegments, + weights, + couplings, + timeX, + timeZ + ); + let cost = CalculatedCost(segmentCosts, result); + let sat = IsSatisfactory(numSegments, result); + Message($"result = {result}, cost = {cost}, satisfactory = {sat}"); + if (sat) { + if (cost < bestCost - limit) { + // New best cost found - update + set bestCost = cost; + set bestItinerary = result; + set successNumber = 1; + } elif (AbsD(cost - bestCost) < limit) { + set successNumber += 1; + } + } + } + let runPercentage = IntAsDouble(successNumber) * 100.0 / IntAsDouble(numTrials); + Message("Simulation is complete\n"); + Message($"Best itinerary found: {bestItinerary}, cost = {bestCost}"); + Message($"{runPercentage}% of runs found the best itinerary\n"); + } +} diff --git a/src/munchkin/tests/qsharp/qaoa/README.md b/src/munchkin/tests/qsharp/qaoa/README.md new file mode 100644 index 0000000..78d3272 --- /dev/null +++ b/src/munchkin/tests/qsharp/qaoa/README.md @@ -0,0 +1,85 @@ +--- +page_type: sample +languages: +- qsharp +products: +- qdk +description: "Using QAOA to calculate the path in a weighted graph with the smallest cost" +--- + +# Quantum Approximate Optimization Algorithm + +This is an example of a Quantum Approximate Optimization Algorithm (QAOA) implemented in a Q# program. QAOA was first introduced by Farhi et al. in [A Quantum Approximate Optimization Algorithm](https://arxiv.org/abs/1411.4028). + +This sample is based on the "Traveling Santa" problem described by Stephen Jordan in his Quantum Algorithm Zoo post, [Traveling Santa Problem](http://quantumalgorithmzoo.org/traveling_santa/). + +## Prerequisites + +- The Microsoft [Quantum Development Kit](https://docs.microsoft.com/azure/quantum/install-overview-qdk/). + +## Description + +Here we will use a combinatorial optimization problem to demonstrate the usage of an implementation of QAOA. We consider a "Traveling Santa" problem. Santa needs to visit a a few different houses and end up back on the North Pole. We can express this as a graph with 4 nodes, where each node represents a house and each node is connected to all other nodes. The edges in the graph each have a cost associated with them. The goal is to find the optimal route such that Santa visits all houses only once and ends up back on the North Pole. + +The idea behind QAOA is to express a problem as an Ising Hamiltonian and replace each variable z_j with a Pauli-Z operation acting on the jth qubit, such that + +![𝐻_𝐶 = Σᵢⱼ 𝐽ᵢⱼ𝑍ᵢ𝑍ⱼ + Σᵢ ℎᵢ𝑍ᵢ](hamil1.png) + +If we then find the ground state of said Hamiltonian, we can find the solution by measuring the value of each qubit in the Pauli-Z basis. To find said ground state we intersperse time evolutions of the instance Hamiltonian by time evolutions induced by a "driver" Hamiltonian that evolves the qubits by a Pauli-X rotation of the form: + +![𝐻₀ = −Σᵢ 𝑋ᵢ](hamil2.png) + +In this sample, the time evolutions are pre-determined, however, these could also be found variationally as part of a hybrid classical-quantum algorithm, by using e.g. gradient descent. + +In this sample, we use QAOA to solve the Traveling Santa problem. The constraints are expressed as penalties in the instance Hamiltonian weights and coupling terms. For a more detailed explanation of this sample, please visit Stephen Jordan's Quantum Algorithm Zoo post, [Traveling Santa Problem](http://quantumalgorithmzoo.org/traveling_santa/). + +## Running the Sample + +Browse to the `samples/algorithms/qaoa` folder and run `dotnet build` to build the project. Then run `dotnet run [options] --no-build`. Optionally, omit the `--no-build` option to automatically build the project before execution. + +To see options, run `dotnet run -- --help`. + +```bash +Options: + --num-trials (REQUIRED) The number of trials to run. + -s, --simulator The name of the simulator to use. + --version Show version information + -?, -h, --help Show help and usage information +``` + +## Manifest + +- **qaoa/** + - [QAOA.csproj](./QAOA.csproj): Main Q# project for the example. + - [QAOA.qs](./QAOA.qs): The Q# implementation of the QAOA program. + +## Example run + +```bash +> dotnet run --num-trials 20 +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,True,True,True], cost = 29.16, satisfactory = False +result = [True,False,True,True,True,True], cost = 29.16, satisfactory = False +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,False,False], cost = 13.73, satisfactory = False +result = [True,False,True,False,True,True], cost = 23.46, satisfactory = True +result = [True,False,True,False,False,False], cost = 13.73, satisfactory = False +Simulation is complete + +Best itinerary found: [True,False,True,False,True,True], cost = 23.46 +85% of runs found the best itinerary +``` diff --git a/src/munchkin/tests/qsharp/qaoa/hamil1.png b/src/munchkin/tests/qsharp/qaoa/hamil1.png new file mode 100644 index 0000000..a78426a Binary files /dev/null and b/src/munchkin/tests/qsharp/qaoa/hamil1.png differ diff --git a/src/munchkin/tests/qsharp/qaoa/hamil2.png b/src/munchkin/tests/qsharp/qaoa/hamil2.png new file mode 100644 index 0000000..1536e11 Binary files /dev/null and b/src/munchkin/tests/qsharp/qaoa/hamil2.png differ diff --git a/src/munchkin/tests/qsharp/qaoa/libLLVM.dll b/src/munchkin/tests/qsharp/qaoa/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/qaoa/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/qaoa/qir/QAOA.ll b/src/munchkin/tests/qsharp/qaoa/qir/QAOA.ll new file mode 100644 index 0000000..72e21d1 --- /dev/null +++ b/src/munchkin/tests/qsharp/qaoa/qir/QAOA.ll @@ -0,0 +1,3063 @@ + +%Tuple = type opaque +%Array = type opaque +%Callable = type opaque +%Range = type { i64, i64, i64 } +%Qubit = type opaque +%String = type opaque +%Result = type opaque + +@PartialApplication__1__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__1__ctladj__wrapper] +@Microsoft__Quantum__Intrinsic__Rx__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__Rx__ctladj__wrapper] +@MemoryManagement__1__FunctionTable = internal constant [2 x void (%Tuple*, i32)*] [void (%Tuple*, i32)* @MemoryManagement__1__RefCount, void (%Tuple*, i32)* @MemoryManagement__1__AliasCount] +@PartialApplication__2__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__2__ctladj__wrapper] +@PartialApplication__3__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__3__ctladj__wrapper] +@PartialApplication__4__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Lifted__PartialApplication__4__ctladj__wrapper] +@0 = internal constant [80 x i8] c"Currently, HamiltonianCouplings only supports given constraints for 6 segments.\00" +@1 = internal constant [68 x i8] c"Currently, IsSatisfactory only supports constraints for 6 segments.\00" +@2 = internal constant [40 x i8] c"timeZ and timeX are not the same length\00" +@Microsoft__Quantum__Intrinsic__H__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper] +@3 = internal constant [10 x i8] c"result = \00" +@4 = internal constant [3 x i8] c", \00" +@5 = internal constant [2 x i8] c"[\00" +@6 = internal constant [5 x i8] c"true\00" +@7 = internal constant [6 x i8] c"false\00" +@8 = internal constant [2 x i8] c"]\00" +@9 = internal constant [10 x i8] c", cost = \00" +@10 = internal constant [18 x i8] c", satisfactory = \00" +@11 = internal constant [24 x i8] c"Simulation is complete\0A\00" +@12 = internal constant [23 x i8] c"Best itinerary found: \00" +@13 = internal constant [36 x i8] c"% of runs found the best itinerary\0A\00" +@14 = internal constant [2 x i8] c"\22\00" +@15 = internal constant [13 x i8] c"\0A\09Expected:\09\00" +@16 = internal constant [11 x i8] c"\0A\09Actual:\09\00" +@Microsoft__Quantum__Convert__ResultAsBool__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Convert__ResultAsBool__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@Microsoft__Quantum__Intrinsic__M__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Intrinsic__M__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null, void (%Tuple*, %Tuple*, %Tuple*)* null] +@17 = internal constant [3 x i8] c"()\00" + +define internal void @Microsoft__Quantum__Samples__QAOA__ApplyDriverHamiltonian__body(double %time, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = fmul double -2.000000e+00, %time + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + store %Callable* %0, %Callable** %4, align 8 + store double %1, double* %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__1__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %2) + call void @Microsoft__Quantum__Canon___8e6f761afcaa48b3b346030d61881540_ApplyToEachCA__body(%Callable* %6, %Array* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + ret void +} + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +define internal void @Microsoft__Quantum__Canon___8e6f761afcaa48b3b346030d61881540_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___5da6d4c7ac3c4cc8b467e7839782288c_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__1__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__body(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { double, %Qubit* }* + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %3 = load double, double* %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__adj(double %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { double, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { double, %Qubit* }*, { double, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %3, { double, %Qubit* }* %4) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +define internal void @MemoryManagement__1__RefCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_reference_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +define internal void @MemoryManagement__1__AliasCount(%Tuple* %capture-tuple, i32 %count-change) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %2 = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__callable_update_alias_count(%Callable* %2, i32 %count-change) + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %capture-tuple, i32 %count-change) + ret void +} + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__Rx__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 1, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__adj(double %theta, %Qubit* %qubit) { +entry: + %theta__1 = fneg double %theta + call void @__quantum__qis__r__body(i2 1, double %theta__1, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 1, i2* %5, align 1 + store double %theta, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rx__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %theta__1 = fneg double %theta + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 1, i2* %5, align 1 + store double %theta__1, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal void @Microsoft__Quantum__Samples__QAOA__ApplyDriverHamiltonian__adj(double %time, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = fmul double -2.000000e+00, %time + %2 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %3 = bitcast %Tuple* %2 to { %Callable*, double }* + %4 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 0 + %5 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %3, i32 0, i32 1 + store %Callable* %0, %Callable** %4, align 8 + store double %1, double* %5, align 8 + %6 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__2__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %2) + call void @Microsoft__Quantum__Canon___8e6f761afcaa48b3b346030d61881540_ApplyToEachCA__adj(%Callable* %6, %Array* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %6, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %6, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8e6f761afcaa48b3b346030d61881540_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___5da6d4c7ac3c4cc8b467e7839782288c_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + %4 = sub i64 %3, %1 + %5 = sdiv i64 %4, %2 + %6 = mul i64 %2, %5 + %7 = add i64 %1, %6 + %8 = sub i64 0, %2 + %9 = insertvalue %Range zeroinitializer, i64 %7, 0 + %10 = insertvalue %Range %9, i64 %8, 1 + %11 = insertvalue %Range %10, i64 %1, 2 + %12 = extractvalue %Range %11, 0 + %13 = extractvalue %Range %11, 1 + %14 = extractvalue %Range %11, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %15 = icmp sgt i64 %13, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %12, %preheader__1 ], [ %26, %exiting__1 ] + %16 = icmp sle i64 %__qsVar0__idxQubit__, %14 + %17 = icmp sge i64 %__qsVar0__idxQubit__, %14 + %18 = select i1 %15, i1 %16, i1 %17 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %19) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %21 = bitcast i8* %20 to %Qubit** + %22 = load %Qubit*, %Qubit** %21, align 8 + %23 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %24 = bitcast %Tuple* %23 to { %Qubit* }* + %25 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %24, i32 0, i32 0 + store %Qubit* %22, %Qubit** %25, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %19, %Tuple* %23, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %19, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %23, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %26 = add i64 %__qsVar0__idxQubit__, %13 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__2__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Samples__QAOA__ApplyDriverHamiltonian__ctl(%Array* %__controlQubits__, { double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %time = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = fmul double -2.000000e+00, %time + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double }* + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store double %4, double* %8, align 8 + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__3__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %5) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + store %Callable* %9, %Callable** %12, align 8 + store %Array* %target, %Array** %13, align 8 + call void @Microsoft__Quantum__Canon___8e6f761afcaa48b3b346030d61881540_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %11) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8e6f761afcaa48b3b346030d61881540_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___5da6d4c7ac3c4cc8b467e7839782288c_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %7 = icmp sgt i64 %5, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %4, %preheader__1 ], [ %19, %exiting__1 ] + %8 = icmp sle i64 %idxQubit, %6 + %9 = icmp sge i64 %idxQubit, %6 + %10 = select i1 %7, i1 %8, i1 %9 + br i1 %10, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %11 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %11) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %16 = bitcast %Tuple* %15 to { %Array*, %Qubit* }* + %17 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 0 + %18 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %16, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %17, align 8 + store %Qubit* %14, %Qubit** %18, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %15, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %11, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %19 = add i64 %idxQubit, %5 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__3__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define internal void @Microsoft__Quantum__Samples__QAOA__ApplyDriverHamiltonian__ctladj(%Array* %__controlQubits__, { double, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 0 + %time = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Array* }, { double, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__Rx__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = fmul double -2.000000e+00, %time + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, double }* getelementptr ({ %Callable*, double }, { %Callable*, double }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, double }* + %7 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store double %4, double* %8, align 8 + %9 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @PartialApplication__4__FunctionTable, [2 x void (%Tuple*, i32)*]* @MemoryManagement__1__FunctionTable, %Tuple* %5) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { %Callable*, %Array* }* + %12 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %11, i32 0, i32 1 + store %Callable* %9, %Callable** %12, align 8 + store %Array* %target, %Array** %13, align 8 + call void @Microsoft__Quantum__Canon___8e6f761afcaa48b3b346030d61881540_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %11) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %9, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %target, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %10, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___8e6f761afcaa48b3b346030d61881540_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %3 = call %Range @Microsoft__Quantum__Arrays___5da6d4c7ac3c4cc8b467e7839782288c_IndexRange__body(%Array* %register) + %4 = extractvalue %Range %3, 0 + %5 = extractvalue %Range %3, 1 + %6 = extractvalue %Range %3, 2 + %7 = sub i64 %6, %4 + %8 = sdiv i64 %7, %5 + %9 = mul i64 %5, %8 + %10 = add i64 %4, %9 + %11 = sub i64 0, %5 + %12 = insertvalue %Range zeroinitializer, i64 %10, 0 + %13 = insertvalue %Range %12, i64 %11, 1 + %14 = insertvalue %Range %13, i64 %4, 2 + %15 = extractvalue %Range %14, 0 + %16 = extractvalue %Range %14, 1 + %17 = extractvalue %Range %14, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %18 = icmp sgt i64 %16, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %15, %preheader__1 ], [ %30, %exiting__1 ] + %19 = icmp sle i64 %__qsVar0__idxQubit__, %17 + %20 = icmp sge i64 %__qsVar0__idxQubit__, %17 + %21 = select i1 %18, i1 %19, i1 %20 + br i1 %21, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %22 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %22) + call void @__quantum__rt__callable_make_controlled(%Callable* %22) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %23 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %24 = bitcast i8* %23 to %Qubit** + %25 = load %Qubit*, %Qubit** %24, align 8 + %26 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Qubit* }* getelementptr ({ %Array*, %Qubit* }, { %Array*, %Qubit* }* null, i32 1) to i64)) + %27 = bitcast %Tuple* %26 to { %Array*, %Qubit* }* + %28 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 0 + %29 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %27, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %28, align 8 + store %Qubit* %25, %Qubit** %29, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %22, %Tuple* %26, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %26, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %30 = add i64 %__qsVar0__idxQubit__, %16 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %11, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %1 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 1 + %2 = load double, double* %1, align 8 + %3 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %4 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %3, i32 0, i32 0 + %5 = load %Qubit*, %Qubit** %4, align 8 + %6 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %7 = bitcast %Tuple* %6 to { double, %Qubit* }* + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + store double %2, double* %8, align 8 + store %Qubit* %5, %Qubit** %9, align 8 + %10 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %0, i32 0, i32 0 + %11 = load %Callable*, %Callable** %10, align 8 + %12 = call %Callable* @__quantum__rt__callable_copy(%Callable* %11, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %12) + call void @__quantum__rt__callable_invoke(%Callable* %12, %Tuple* %6, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %6, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %12, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %12, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Lifted__PartialApplication__4__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + %5 = bitcast %Tuple* %capture-tuple to { %Callable*, double }* + %6 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 1 + %7 = load double, double* %6, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %9 = bitcast %Tuple* %8 to { double, %Qubit* }* + %10 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 0 + %11 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %9, i32 0, i32 1 + store double %7, double* %10, align 8 + store %Qubit* %4, %Qubit** %11, align 8 + %12 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { double, %Qubit* }* }* getelementptr ({ %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* null, i32 1) to i64)) + %13 = bitcast %Tuple* %12 to { %Array*, { double, %Qubit* }* }* + %14 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 0 + %15 = getelementptr inbounds { %Array*, { double, %Qubit* }* }, { %Array*, { double, %Qubit* }* }* %13, i32 0, i32 1 + store %Array* %3, %Array** %14, align 8 + store { double, %Qubit* }* %9, { double, %Qubit* }** %15, align 8 + %16 = getelementptr inbounds { %Callable*, double }, { %Callable*, double }* %5, i32 0, i32 0 + %17 = load %Callable*, %Callable** %16, align 8 + %18 = call %Callable* @__quantum__rt__callable_copy(%Callable* %17, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %18) + call void @__quantum__rt__callable_make_controlled(%Callable* %18) + call void @__quantum__rt__callable_invoke(%Callable* %18, %Tuple* %12, %Tuple* %result-tuple) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %12, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %18, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %18, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Samples__QAOA__ApplyInstanceHamiltonian__body(i64 %numSegments, double %time, %Array* %weights, %Array* %coupling, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %weights, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %coupling, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %auxiliary = call %Qubit* @__quantum__rt__qubit_allocate() + %0 = call %Array* @Microsoft__Quantum__Arrays___601af6c46d6c4dfc9a1117e4934f40cf_Zipped__body(%Array* %weights, %Array* %target) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %12, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %3) + %6 = bitcast i8* %5 to { double, %Qubit* }** + %7 = load { double, %Qubit* }*, { double, %Qubit* }** %6, align 8 + %8 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 0 + %h = load double, double* %8, align 8 + %9 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %7, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %9, align 8 + %10 = fmul double 2.000000e+00, %time + %11 = fmul double %10, %h + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %11, %Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %12 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %i = phi i64 [ 0, %exit__1 ], [ %15, %exiting__2 ] + %13 = icmp sle i64 %i, 5 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = add i64 %i, 1 + br label %header__3 + +exiting__2: ; preds = %exit__3 + %15 = add i64 %i, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %16 = sub i64 %1, 1 + br label %header__4 + +header__3: ; preds = %exiting__3, %body__2 + %j = phi i64 [ %14, %body__2 ], [ %37, %exiting__3 ] + %17 = icmp sle i64 %j, 5 + br i1 %17, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %target, i64 %i) + %19 = bitcast i8* %18 to %Qubit** + %20 = load %Qubit*, %Qubit** %19, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %20, %Qubit* %auxiliary) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %target, i64 %j) + %22 = bitcast i8* %21 to %Qubit** + %23 = load %Qubit*, %Qubit** %22, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %23, %Qubit* %auxiliary) + %24 = fmul double 2.000000e+00, %time + %25 = mul i64 %numSegments, %i + %26 = add i64 %25, %j + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %coupling, i64 %26) + %28 = bitcast i8* %27 to double* + %29 = load double, double* %28, align 8 + %30 = fmul double %24, %29 + call void @Microsoft__Quantum__Intrinsic__Rz__body(double %30, %Qubit* %auxiliary) + %31 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %target, i64 %j) + %32 = bitcast i8* %31 to %Qubit** + %33 = load %Qubit*, %Qubit** %32, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %33, %Qubit* %auxiliary) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %target, i64 %i) + %35 = bitcast i8* %34 to %Qubit** + %36 = load %Qubit*, %Qubit** %35, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %36, %Qubit* %auxiliary) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %37 = add i64 %j, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + br label %exiting__2 + +header__4: ; preds = %exiting__4, %exit__2 + %38 = phi i64 [ 0, %exit__2 ], [ %44, %exiting__4 ] + %39 = icmp sle i64 %38, %16 + br i1 %39, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %40 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %38) + %41 = bitcast i8* %40 to { double, %Qubit* }** + %42 = load { double, %Qubit* }*, { double, %Qubit* }** %41, align 8 + %43 = bitcast { double, %Qubit* }* %42 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %43, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %44 = add i64 %38, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %auxiliary) + call void @__quantum__rt__array_update_alias_count(%Array* %weights, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %coupling, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + ret void +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +define internal %Array* @Microsoft__Quantum__Arrays___601af6c46d6c4dfc9a1117e4934f40cf_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to double* + %7 = load double, double* %6, align 8 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { double, %Qubit* }* + %13 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %12, i32 0, i32 1 + store double %7, double* %13, align 8 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { double, %Qubit* }** + store { double, %Qubit* }* %12, { double, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { double, %Qubit* }** + %27 = load { double, %Qubit* }*, { double, %Qubit* }** %26, align 8 + %28 = bitcast { double, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to double* + %36 = load double, double* %35, align 8 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, %Qubit* }* getelementptr ({ double, %Qubit* }, { double, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { double, %Qubit* }* + %42 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %41, i32 0, i32 1 + store double %36, double* %42, align 8 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { double, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { double, %Qubit* }*, { double, %Qubit* }** %45, align 8 + %47 = bitcast { double, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { double, %Qubit* }* %41, { double, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { double, %Qubit* }** + %56 = load { double, %Qubit* }*, { double, %Qubit* }** %55, align 8 + %57 = bitcast { double, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +define internal void @Microsoft__Quantum__Intrinsic__Rz__body(double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 -2, double %theta, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal double @Microsoft__Quantum__Samples__QAOA__CalculatedCost__body(%Array* %segmentCosts, %Array* %usedSegments) { +entry: + %finalCost = alloca double, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %segmentCosts, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %usedSegments, i32 1) + store double 0.000000e+00, double* %finalCost, align 8 + %0 = call %Array* @Microsoft__Quantum__Arrays___e8e95760bf164410939f3fcd14e4a719_Zipped__body(%Array* %segmentCosts, %Array* %usedSegments) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %0) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %13, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %3) + %6 = bitcast i8* %5 to { double, i1 }** + %7 = load { double, i1 }*, { double, i1 }** %6, align 8 + %8 = getelementptr inbounds { double, i1 }, { double, i1 }* %7, i32 0, i32 0 + %cost = load double, double* %8, align 8 + %9 = getelementptr inbounds { double, i1 }, { double, i1 }* %7, i32 0, i32 1 + %segment = load i1, i1* %9, align 1 + %10 = load double, double* %finalCost, align 8 + %11 = select i1 %segment, double %cost, double 0.000000e+00 + %12 = fadd double %10, %11 + store double %12, double* %finalCost, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %13 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %14 = load double, double* %finalCost, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %segmentCosts, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %usedSegments, i32 -1) + %15 = sub i64 %1, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %16 = phi i64 [ 0, %exit__1 ], [ %22, %exiting__2 ] + %17 = icmp sle i64 %16, %15 + br i1 %17, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %16) + %19 = bitcast i8* %18 to { double, i1 }** + %20 = load { double, i1 }*, { double, i1 }** %19, align 8 + %21 = bitcast { double, i1 }* %20 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %21, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %22 = add i64 %16, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret double %14 +} + +define internal %Array* @Microsoft__Quantum__Arrays___e8e95760bf164410939f3fcd14e4a719_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to double* + %7 = load double, double* %6, align 8 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to i1* + %10 = load i1, i1* %9, align 1 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i1 }* getelementptr ({ double, i1 }, { double, i1 }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { double, i1 }* + %13 = getelementptr inbounds { double, i1 }, { double, i1 }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { double, i1 }, { double, i1 }* %12, i32 0, i32 1 + store double %7, double* %13, align 8 + store i1 %10, i1* %14, align 1 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { double, i1 }** + store { double, i1 }* %12, { double, i1 }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { double, i1 }** + %27 = load { double, i1 }*, { double, i1 }** %26, align 8 + %28 = bitcast { double, i1 }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to double* + %36 = load double, double* %35, align 8 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to i1* + %39 = load i1, i1* %38, align 1 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, i1 }* getelementptr ({ double, i1 }, { double, i1 }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { double, i1 }* + %42 = getelementptr inbounds { double, i1 }, { double, i1 }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { double, i1 }, { double, i1 }* %41, i32 0, i32 1 + store double %36, double* %42, align 8 + store i1 %39, i1* %43, align 1 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { double, i1 }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { double, i1 }*, { double, i1 }** %45, align 8 + %47 = bitcast { double, i1 }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { double, i1 }* %41, { double, i1 }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { double, i1 }** + %56 = load { double, i1 }*, { double, i1 }** %55, align 8 + %57 = bitcast { double, i1 }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal %Array* @Microsoft__Quantum__Samples__QAOA__HamiltonianCouplings__body(double %penalty, i64 %numSegments) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([80 x i8], [80 x i8]* @0, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactI__body(i64 %numSegments, i64 6, %String* %0) + %1 = mul i64 %numSegments, %numSegments + %2 = fmul double 2.000000e+00, %penalty + %3 = call %Array* @Microsoft__Quantum__Arrays___9c580c668ba942d6ad733aafc0ca3cba_ConstantArray__body(i64 %1, double %2) + %4 = call %Array* @__quantum__rt__array_copy(%Array* %3, i1 false) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 2) + %6 = bitcast i8* %5 to double* + store double %penalty, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + %7 = call %Array* @__quantum__rt__array_copy(%Array* %4, i1 false) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 9) + %9 = bitcast i8* %8 to double* + store double %penalty, double* %9, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + %10 = call %Array* @__quantum__rt__array_copy(%Array* %7, i1 false) + %11 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 29) + %12 = bitcast i8* %11 to double* + store double %penalty, double* %12, align 8 + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + ret %Array* %10 +} + +define internal void @Microsoft__Quantum__Diagnostics__EqualityFactI__body(i64 %actual, i64 %expected, %String* %message) { +entry: + %0 = icmp ne i64 %actual, %expected + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Diagnostics___1f44bb58994a4427b591e8ff1435ee54___QsRef0__FormattedFailure____body(i64 %actual, i64 %expected, %String* %message) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +declare %String* @__quantum__rt__string_create(i8*) + +define internal %Array* @Microsoft__Quantum__Arrays___9c580c668ba942d6ad733aafc0ca3cba_ConstantArray__body(i64 %length, double %value) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %1 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to double* + store double %value, double* %5, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + ret %Array* %0 +} + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +define internal %Array* @Microsoft__Quantum__Samples__QAOA__HamiltonianWeights__body(%Array* %segmentCosts, double %penalty, i64 %numSegments) { +entry: + %weights = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %segmentCosts, i32 1) + %0 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %numSegments) + %1 = sub i64 %numSegments, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %2) + %5 = bitcast i8* %4 to double* + store double 0.000000e+00, double* %5, align 8 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %0, %Array** %weights, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %0, i32 1) + %7 = sub i64 %numSegments, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %i = phi i64 [ 0, %exit__1 ], [ %19, %exiting__2 ] + %8 = icmp sle i64 %i, %7 + br i1 %8, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %9 = load %Array*, %Array** %weights, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %9, i32 -1) + %10 = call %Array* @__quantum__rt__array_copy(%Array* %9, i1 false) + %11 = fmul double 4.000000e+00, %penalty + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %segmentCosts, i64 %i) + %13 = bitcast i8* %12 to double* + %14 = load double, double* %13, align 8 + %15 = fmul double 5.000000e-01, %14 + %16 = fsub double %11, %15 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %10, i64 %i) + %18 = bitcast i8* %17 to double* + store double %16, double* %18, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %10, i32 1) + store %Array* %10, %Array** %weights, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %19 = add i64 %i, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %20 = load %Array*, %Array** %weights, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %segmentCosts, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + ret %Array* %20 +} + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +define internal i1 @Microsoft__Quantum__Samples__QAOA__IsSatisfactory__body(i64 %numSegments, %Array* %usedSegments) { +entry: + %hammingWeight = alloca i64, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %usedSegments, i32 1) + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([68 x i8], [68 x i8]* @1, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactI__body(i64 %numSegments, i64 6, %String* %0) + store i64 0, i64* %hammingWeight, align 4 + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %usedSegments) + %2 = sub i64 %1, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %3 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %4 = icmp sle i64 %3, %2 + br i1 %4, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %usedSegments, i64 %3) + %6 = bitcast i8* %5 to i1* + %segment = load i1, i1* %6, align 1 + %7 = load i64, i64* %hammingWeight, align 4 + %8 = select i1 %segment, i64 1, i64 0 + %9 = add i64 %7, %8 + store i64 %9, i64* %hammingWeight, align 4 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %3, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %11 = load i64, i64* %hammingWeight, align 4 + %12 = icmp ne i64 %11, 4 + br i1 %12, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %exit__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %usedSegments, i64 0) + %14 = bitcast i8* %13 to i1* + %15 = load i1, i1* %14, align 1 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %usedSegments, i64 2) + %17 = bitcast i8* %16 to i1* + %18 = load i1, i1* %17, align 1 + %19 = icmp ne i1 %15, %18 + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %exit__1 + %20 = phi i1 [ %12, %exit__1 ], [ %19, %condFalse__1 ] + br i1 %20, label %condContinue__2, label %condFalse__2 + +condFalse__2: ; preds = %condContinue__1 + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %usedSegments, i64 1) + %22 = bitcast i8* %21 to i1* + %23 = load i1, i1* %22, align 1 + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %usedSegments, i64 3) + %25 = bitcast i8* %24 to i1* + %26 = load i1, i1* %25, align 1 + %27 = icmp ne i1 %23, %26 + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condContinue__1 + %28 = phi i1 [ %20, %condContinue__1 ], [ %27, %condFalse__2 ] + br i1 %28, label %condContinue__3, label %condFalse__3 + +condFalse__3: ; preds = %condContinue__2 + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %usedSegments, i64 4) + %30 = bitcast i8* %29 to i1* + %31 = load i1, i1* %30, align 1 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %usedSegments, i64 5) + %33 = bitcast i8* %32 to i1* + %34 = load i1, i1* %33, align 1 + %35 = icmp ne i1 %31, %34 + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condContinue__2 + %36 = phi i1 [ %28, %condContinue__2 ], [ %35, %condFalse__3 ] + br i1 %36, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__3 + call void @__quantum__rt__array_update_alias_count(%Array* %usedSegments, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret i1 false + +continue__1: ; preds = %condContinue__3 + call void @__quantum__rt__array_update_alias_count(%Array* %usedSegments, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret i1 true +} + +define internal %Array* @Microsoft__Quantum__Samples__QAOA__PerformQAOA__body(i64 %numSegments, %Array* %weights, %Array* %couplings, %Array* %timeX, %Array* %timeZ) { +entry: + %result = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %weights, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %couplings, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %timeX, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %timeZ, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %timeX) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %timeZ) + %2 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([40 x i8], [40 x i8]* @2, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__EqualityFactI__body(i64 %0, i64 %1, %String* %2) + %3 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %numSegments) + %4 = sub i64 %numSegments, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %9, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 %5) + %8 = bitcast i8* %7 to i1* + store i1 false, i1* %8, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %9 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %3, %Array** %result, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 1) + %x = call %Array* @__quantum__rt__qubit_allocate_array(i64 %numSegments) + call void @__quantum__rt__array_update_alias_count(%Array* %x, i32 1) + %10 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__H__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + call void @Microsoft__Quantum__Canon___07ad636ba1b24e25922107572495b968_ApplyToEach__body(%Callable* %10, %Array* %x) + %11 = call %Array* @Microsoft__Quantum__Arrays___c6e35ad819d44fb09cceb07d141b5271_Zipped__body(%Array* %timeZ, %Array* %timeX) + %12 = call i64 @__quantum__rt__array_get_size_1d(%Array* %11) + %13 = sub i64 %12, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %14 = phi i64 [ 0, %exit__1 ], [ %21, %exiting__2 ] + %15 = icmp sle i64 %14, %13 + br i1 %15, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %14) + %17 = bitcast i8* %16 to { double, double }** + %18 = load { double, double }*, { double, double }** %17, align 8 + %19 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 0 + %tz = load double, double* %19, align 8 + %20 = getelementptr inbounds { double, double }, { double, double }* %18, i32 0, i32 1 + %tx = load double, double* %20, align 8 + call void @Microsoft__Quantum__Samples__QAOA__ApplyInstanceHamiltonian__body(i64 %numSegments, double %tz, %Array* %weights, %Array* %couplings, %Array* %x) + call void @Microsoft__Quantum__Samples__QAOA__ApplyDriverHamiltonian__body(double %tx, %Array* %x) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %14, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %22 = call %Array* @Microsoft__Quantum__Measurement__MultiM__body(%Array* %x) + %23 = call %Array* @Microsoft__Quantum__Convert__ResultArrayAsBoolArray__body(%Array* %22) + call void @__quantum__rt__array_update_alias_count(%Array* %x, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %weights, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %couplings, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %timeX, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %timeZ, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %10, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %10, i32 -1) + %24 = sub i64 %12, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %25 = phi i64 [ 0, %exit__2 ], [ %31, %exiting__3 ] + %26 = icmp sle i64 %25, %24 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %25) + %28 = bitcast i8* %27 to { double, double }** + %29 = load { double, double }*, { double, double }** %28, align 8 + %30 = bitcast { double, double }* %29 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %30, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %31 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + %32 = call i64 @__quantum__rt__array_get_size_1d(%Array* %22) + %33 = sub i64 %32, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %34 = phi i64 [ 0, %exit__3 ], [ %39, %exiting__4 ] + %35 = icmp sle i64 %34, %33 + br i1 %35, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %36 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %22, i64 %34) + %37 = bitcast i8* %36 to %Result** + %38 = load %Result*, %Result** %37, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %38, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %39 = add i64 %34, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_reference_count(%Array* %22, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %x) + ret %Array* %23 +} + +declare void @__quantum__rt__qubit_release_array(%Array*) + +define internal void @Microsoft__Quantum__Canon___07ad636ba1b24e25922107572495b968_ApplyToEach__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call %Range @Microsoft__Quantum__Arrays___5da6d4c7ac3c4cc8b467e7839782288c_IndexRange__body(%Array* %register) + %1 = extractvalue %Range %0, 0 + %2 = extractvalue %Range %0, 1 + %3 = extractvalue %Range %0, 2 + br label %preheader__1 + +preheader__1: ; preds = %entry + %4 = icmp sgt i64 %2, 0 + br label %header__1 + +header__1: ; preds = %exiting__1, %preheader__1 + %idxQubit = phi i64 [ %1, %preheader__1 ], [ %14, %exiting__1 ] + %5 = icmp sle i64 %idxQubit, %3 + %6 = icmp sge i64 %idxQubit, %3 + %7 = select i1 %4, i1 %5, i1 %6 + br i1 %7, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { %Qubit* }* + %13 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %12, i32 0, i32 0 + store %Qubit* %10, %Qubit** %13, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %11, %Tuple* null) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %14 = add i64 %idxQubit, %2 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + call void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %2) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, %Qubit* }* + %1 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, %Qubit* }, { %Array*, %Qubit* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %3, %Qubit* %4) + ret void +} + +define internal %Array* @Microsoft__Quantum__Arrays___c6e35ad819d44fb09cceb07d141b5271_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to double* + %7 = load double, double* %6, align 8 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to double* + %10 = load double, double* %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { double, double }* + %13 = getelementptr inbounds { double, double }, { double, double }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { double, double }, { double, double }* %12, i32 0, i32 1 + store double %7, double* %13, align 8 + store double %10, double* %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { double, double }** + store { double, double }* %12, { double, double }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { double, double }** + %27 = load { double, double }*, { double, double }** %26, align 8 + %28 = bitcast { double, double }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to double* + %36 = load double, double* %35, align 8 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to double* + %39 = load double, double* %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ double, double }* getelementptr ({ double, double }, { double, double }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { double, double }* + %42 = getelementptr inbounds { double, double }, { double, double }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { double, double }, { double, double }* %41, i32 0, i32 1 + store double %36, double* %42, align 8 + store double %39, double* %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { double, double }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { double, double }*, { double, double }** %45, align 8 + %47 = bitcast { double, double }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { double, double }* %41, { double, double }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { double, double }** + %56 = load { double, double }*, { double, double }** %55, align 8 + %57 = bitcast { double, double }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +define internal %Array* @Microsoft__Quantum__Convert__ResultArrayAsBoolArray__body(%Array* %input) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Convert__ResultAsBool__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___79fbf3b2b4ff4028aa1e67ffa2332bc3_Mapped__body(%Callable* %0, %Array* %input) + call void @__quantum__rt__array_update_alias_count(%Array* %input, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret %Array* %1 +} + +define internal %Array* @Microsoft__Quantum__Measurement__MultiM__body(%Array* %targets) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %targets, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Intrinsic__M__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___f7ee1a4096d24362a9c98c3cb2ab8f16_ForEach__body(%Callable* %0, %Array* %targets) + call void @__quantum__rt__array_update_alias_count(%Array* %targets, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + ret %Array* %1 +} + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Samples__QAOA__RunQAOATrials__body() { +entry: + %successNumber = alloca i64, align 8 + %bestItinerary = alloca %Array*, align 8 + %bestCost = alloca double, align 8 + %segmentCosts = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 6) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %segmentCosts, i64 0) + %1 = bitcast i8* %0 to double* + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %segmentCosts, i64 1) + %3 = bitcast i8* %2 to double* + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %segmentCosts, i64 2) + %5 = bitcast i8* %4 to double* + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %segmentCosts, i64 3) + %7 = bitcast i8* %6 to double* + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %segmentCosts, i64 4) + %9 = bitcast i8* %8 to double* + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %segmentCosts, i64 5) + %11 = bitcast i8* %10 to double* + store double 4.700000e+00, double* %1, align 8 + store double 0x40222E147AE147AE, double* %3, align 8 + store double 9.030000e+00, double* %5, align 8 + store double 5.700000e+00, double* %7, align 8 + store double 0x40200A3D70A3D70A, double* %9, align 8 + store double 1.710000e+00, double* %11, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %segmentCosts, i32 1) + %timeX = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 5) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeX, i64 0) + %13 = bitcast i8* %12 to double* + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeX, i64 1) + %15 = bitcast i8* %14 to double* + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeX, i64 2) + %17 = bitcast i8* %16 to double* + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeX, i64 3) + %19 = bitcast i8* %18 to double* + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeX, i64 4) + %21 = bitcast i8* %20 to double* + store double 6.191930e-01, double* %13, align 8 + store double 7.425660e-01, double* %15, align 8 + store double 6.003500e-02, double* %17, align 8 + store double 0xBFF91A708EDE54B5, double* %19, align 8 + store double 4.549000e-02, double* %21, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %timeX, i32 1) + %timeZ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 5) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeZ, i64 0) + %23 = bitcast i8* %22 to double* + %24 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeZ, i64 1) + %25 = bitcast i8* %24 to double* + %26 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeZ, i64 2) + %27 = bitcast i8* %26 to double* + %28 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeZ, i64 3) + %29 = bitcast i8* %28 to double* + %30 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %timeZ, i64 4) + %31 = bitcast i8* %30 to double* + store double 0x40097526D8B1DD5D, double* %23, align 8 + store double 0xBFF239873FFAC1D3, double* %25, align 8 + store double 2.210820e-01, double* %27, align 8 + store double 5.377530e-01, double* %29, align 8 + store double -4.172220e-01, double* %31, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %timeZ, i32 1) + store double 2.000000e+03, double* %bestCost, align 8 + %32 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 5) + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 0) + %34 = bitcast i8* %33 to i1* + %35 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 1) + %36 = bitcast i8* %35 to i1* + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 2) + %38 = bitcast i8* %37 to i1* + %39 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 3) + %40 = bitcast i8* %39 to i1* + %41 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %32, i64 4) + %42 = bitcast i8* %41 to i1* + store i1 false, i1* %34, align 1 + store i1 false, i1* %36, align 1 + store i1 false, i1* %38, align 1 + store i1 false, i1* %40, align 1 + store i1 false, i1* %42, align 1 + store %Array* %32, %Array** %bestItinerary, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 1) + store i64 0, i64* %successNumber, align 4 + %weights = call %Array* @Microsoft__Quantum__Samples__QAOA__HamiltonianWeights__body(%Array* %segmentCosts, double 2.000000e+01, i64 6) + call void @__quantum__rt__array_update_alias_count(%Array* %weights, i32 1) + %couplings = call %Array* @Microsoft__Quantum__Samples__QAOA__HamiltonianCouplings__body(double 2.000000e+01, i64 6) + call void @__quantum__rt__array_update_alias_count(%Array* %couplings, i32 1) + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %trial = phi i64 [ 1, %entry ], [ %49, %exiting__1 ] + %43 = icmp sle i64 %trial, 20 + br i1 %43, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %result = call %Array* @Microsoft__Quantum__Samples__QAOA__PerformQAOA__body(i64 6, %Array* %weights, %Array* %couplings, %Array* %timeX, %Array* %timeZ) + call void @__quantum__rt__array_update_alias_count(%Array* %result, i32 1) + %cost = call double @Microsoft__Quantum__Samples__QAOA__CalculatedCost__body(%Array* %segmentCosts, %Array* %result) + %sat = call i1 @Microsoft__Quantum__Samples__QAOA__IsSatisfactory__body(i64 6, %Array* %result) + %44 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @3, i32 0, i32 0)) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @4, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @5, i32 0, i32 0)) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 1) + %47 = call i64 @__quantum__rt__array_get_size_1d(%Array* %result) + %48 = sub i64 %47, 1 + br label %header__2 + +exiting__1: ; preds = %continue__1 + %49 = add i64 %trial, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + %50 = load i64, i64* %successNumber, align 4 + %51 = sitofp i64 %50 to double + %52 = fmul double %51, 1.000000e+02 + %runPercentage = fdiv double %52, 2.000000e+01 + %53 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([24 x i8], [24 x i8]* @11, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %53) + %54 = load %Array*, %Array** %bestItinerary, align 8 + %55 = load double, double* %bestCost, align 8 + %56 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([23 x i8], [23 x i8]* @12, i32 0, i32 0)) + %57 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @4, i32 0, i32 0)) + %58 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @5, i32 0, i32 0)) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 1) + %59 = call i64 @__quantum__rt__array_get_size_1d(%Array* %54) + %60 = sub i64 %59, 1 + br label %header__3 + +header__2: ; preds = %exiting__2, %body__1 + %61 = phi %String* [ %46, %body__1 ], [ %73, %exiting__2 ] + %62 = phi i64 [ 0, %body__1 ], [ %74, %exiting__2 ] + %63 = icmp sle i64 %62, %48 + br i1 %63, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %64 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %result, i64 %62) + %65 = bitcast i8* %64 to i1* + %66 = load i1, i1* %65, align 1 + %67 = icmp ne %String* %61, %46 + br i1 %67, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %body__2 + %68 = call %String* @__quantum__rt__string_concatenate(%String* %61, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %61, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %body__2 + %69 = phi %String* [ %68, %condTrue__1 ], [ %61, %body__2 ] + br i1 %66, label %condTrue__2, label %condFalse__1 + +condTrue__2: ; preds = %condContinue__1 + %70 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @6, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__1: ; preds = %condContinue__1 + %71 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @7, i32 0, i32 0)) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__1, %condTrue__2 + %72 = phi %String* [ %70, %condTrue__2 ], [ %71, %condFalse__1 ] + %73 = call %String* @__quantum__rt__string_concatenate(%String* %69, %String* %72) + call void @__quantum__rt__string_update_reference_count(%String* %69, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %72, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %condContinue__2 + %74 = add i64 %62, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %75 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @8, i32 0, i32 0)) + %76 = call %String* @__quantum__rt__string_concatenate(%String* %61, %String* %75) + call void @__quantum__rt__string_update_reference_count(%String* %61, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %75, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + %77 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %76) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %76, i32 -1) + %78 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @9, i32 0, i32 0)) + %79 = call %String* @__quantum__rt__string_concatenate(%String* %77, %String* %78) + call void @__quantum__rt__string_update_reference_count(%String* %77, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %78, i32 -1) + %80 = call %String* @__quantum__rt__double_to_string(double %cost) + %81 = call %String* @__quantum__rt__string_concatenate(%String* %79, %String* %80) + call void @__quantum__rt__string_update_reference_count(%String* %79, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %80, i32 -1) + %82 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([18 x i8], [18 x i8]* @10, i32 0, i32 0)) + %83 = call %String* @__quantum__rt__string_concatenate(%String* %81, %String* %82) + call void @__quantum__rt__string_update_reference_count(%String* %81, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %82, i32 -1) + br i1 %sat, label %condTrue__3, label %condFalse__2 + +condTrue__3: ; preds = %exit__2 + %84 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @6, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__2: ; preds = %exit__2 + %85 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @7, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__2, %condTrue__3 + %86 = phi %String* [ %84, %condTrue__3 ], [ %85, %condFalse__2 ] + %87 = call %String* @__quantum__rt__string_concatenate(%String* %83, %String* %86) + call void @__quantum__rt__string_update_reference_count(%String* %83, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %86, i32 -1) + call void @__quantum__rt__message(%String* %87) + br i1 %sat, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__3 + %88 = load double, double* %bestCost, align 8 + %89 = fsub double %88, 0x3EB0C6F7A0B5ED8D + %90 = fcmp olt double %cost, %89 + br i1 %90, label %then0__2, label %test1__1 + +then0__2: ; preds = %then0__1 + store double %cost, double* %bestCost, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %result, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %result, i32 1) + %91 = load %Array*, %Array** %bestItinerary, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %91, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %91, i32 -1) + store %Array* %result, %Array** %bestItinerary, align 8 + store i64 1, i64* %successNumber, align 4 + br label %continue__2 + +test1__1: ; preds = %then0__1 + %92 = load double, double* %bestCost, align 8 + %93 = fsub double %cost, %92 + %94 = call double @Microsoft__Quantum__Math__AbsD__body(double %93) + %95 = fcmp olt double %94, 0x3EB0C6F7A0B5ED8D + br i1 %95, label %then1__1, label %continue__2 + +then1__1: ; preds = %test1__1 + %96 = load i64, i64* %successNumber, align 4 + %97 = add i64 %96, 1 + store i64 %97, i64* %successNumber, align 4 + br label %continue__2 + +continue__2: ; preds = %then1__1, %test1__1, %then0__2 + br label %continue__1 + +continue__1: ; preds = %continue__2, %condContinue__3 + call void @__quantum__rt__array_update_alias_count(%Array* %result, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %87, i32 -1) + br label %exiting__1 + +header__3: ; preds = %exiting__3, %exit__1 + %98 = phi %String* [ %58, %exit__1 ], [ %110, %exiting__3 ] + %99 = phi i64 [ 0, %exit__1 ], [ %111, %exiting__3 ] + %100 = icmp sle i64 %99, %60 + br i1 %100, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %101 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %54, i64 %99) + %102 = bitcast i8* %101 to i1* + %103 = load i1, i1* %102, align 1 + %104 = icmp ne %String* %98, %58 + br i1 %104, label %condTrue__4, label %condContinue__4 + +condTrue__4: ; preds = %body__3 + %105 = call %String* @__quantum__rt__string_concatenate(%String* %98, %String* %57) + call void @__quantum__rt__string_update_reference_count(%String* %98, i32 -1) + br label %condContinue__4 + +condContinue__4: ; preds = %condTrue__4, %body__3 + %106 = phi %String* [ %105, %condTrue__4 ], [ %98, %body__3 ] + br i1 %103, label %condTrue__5, label %condFalse__3 + +condTrue__5: ; preds = %condContinue__4 + %107 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @6, i32 0, i32 0)) + br label %condContinue__5 + +condFalse__3: ; preds = %condContinue__4 + %108 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @7, i32 0, i32 0)) + br label %condContinue__5 + +condContinue__5: ; preds = %condFalse__3, %condTrue__5 + %109 = phi %String* [ %107, %condTrue__5 ], [ %108, %condFalse__3 ] + %110 = call %String* @__quantum__rt__string_concatenate(%String* %106, %String* %109) + call void @__quantum__rt__string_update_reference_count(%String* %106, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %109, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %condContinue__5 + %111 = add i64 %99, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %112 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @8, i32 0, i32 0)) + %113 = call %String* @__quantum__rt__string_concatenate(%String* %98, %String* %112) + call void @__quantum__rt__string_update_reference_count(%String* %98, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %112, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %57, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + %114 = call %String* @__quantum__rt__string_concatenate(%String* %56, %String* %113) + call void @__quantum__rt__string_update_reference_count(%String* %56, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %113, i32 -1) + %115 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([10 x i8], [10 x i8]* @9, i32 0, i32 0)) + %116 = call %String* @__quantum__rt__string_concatenate(%String* %114, %String* %115) + call void @__quantum__rt__string_update_reference_count(%String* %114, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %115, i32 -1) + %117 = call %String* @__quantum__rt__double_to_string(double %55) + %118 = call %String* @__quantum__rt__string_concatenate(%String* %116, %String* %117) + call void @__quantum__rt__string_update_reference_count(%String* %116, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %117, i32 -1) + call void @__quantum__rt__message(%String* %118) + %119 = call %String* @__quantum__rt__double_to_string(double %runPercentage) + %120 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @13, i32 0, i32 0)) + %121 = call %String* @__quantum__rt__string_concatenate(%String* %119, %String* %120) + call void @__quantum__rt__string_update_reference_count(%String* %119, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %120, i32 -1) + call void @__quantum__rt__message(%String* %121) + call void @__quantum__rt__array_update_alias_count(%Array* %segmentCosts, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %timeX, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %timeZ, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %54, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %weights, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %couplings, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %segmentCosts, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %timeX, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %timeZ, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %weights, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %couplings, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %53, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %118, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %121, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %54, i32 -1) + ret void +} + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +declare %String* @__quantum__rt__double_to_string(double) + +declare void @__quantum__rt__message(%String*) + +define internal double @Microsoft__Quantum__Math__AbsD__body(double %a) { +entry: + %0 = fcmp olt double %a, 0.000000e+00 + br i1 %0, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %1 = fneg double %a + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %2 = phi double [ %1, %condTrue__1 ], [ %a, %condFalse__1 ] + ret double %2 +} + +define internal %Range @Microsoft__Quantum__Arrays___5da6d4c7ac3c4cc8b467e7839782288c_IndexRange__body(%Array* %array) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + %2 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %1, 2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %2 +} + +define internal %Array* @Microsoft__Quantum__Arrays___f7ee1a4096d24362a9c98c3cb2ab8f16_ForEach__body(%Callable* %action, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Qubit* }* + %7 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %6, i32 0, i32 0 + store %Qubit* %4, %Qubit** %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { %Result* }* + %10 = getelementptr inbounds { %Result* }, { %Result* }* %9, i32 0, i32 0 + %first = load %Result*, %Result** %10, align 8 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %length) + %12 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %13 = phi i64 [ 0, %continue__1 ], [ %17, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %13) + %16 = bitcast i8* %15 to %Result** + store %Result* %first, %Result** %16, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %first, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %11, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %18 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %35, %exiting__2 ] + %19 = icmp sle i64 %idx, %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + %21 = call %Array* @__quantum__rt__array_copy(%Array* %20, i1 false) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %23 = bitcast i8* %22 to %Qubit** + %24 = load %Qubit*, %Qubit** %23, align 8 + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit* }* getelementptr ({ %Qubit* }, { %Qubit* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Qubit* }* + %27 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %26, i32 0, i32 0 + store %Qubit* %24, %Qubit** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %action, %Tuple* %25, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { %Result* }* + %30 = getelementptr inbounds { %Result* }, { %Result* }* %29, i32 0, i32 0 + %31 = load %Result*, %Result** %30, align 8 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %idx) + %33 = bitcast i8* %32 to %Result** + call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 1) + %34 = load %Result*, %Result** %33, align 8 + call void @__quantum__rt__result_update_reference_count(%Result* %34, i32 -1) + store %Result* %31, %Result** %33, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + store %Array* %21, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %31, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %35 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %36 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %action, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %first, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %36 +} + +define internal %Array* @Microsoft__Quantum__Arrays___79fbf3b2b4ff4028aa1e67ffa2332bc3_Mapped__body(%Callable* %mapper, %Array* %array) { +entry: + %retval = alloca %Array*, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %length = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %0 = icmp eq i64 %length, 0 + br i1 %0, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 0) + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Array* %1 + +continue__1: ; preds = %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 0) + %3 = bitcast i8* %2 to %Result** + %4 = load %Result*, %Result** %3, align 8 + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Result* }* + %7 = getelementptr inbounds { %Result* }, { %Result* }* %6, i32 0, i32 0 + store %Result* %4, %Result** %7, align 8 + %8 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %5, %Tuple* %8) + %9 = bitcast %Tuple* %8 to { i1 }* + %10 = getelementptr inbounds { i1 }, { i1 }* %9, i32 0, i32 0 + %first = load i1, i1* %10, align 1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 %length) + %12 = sub i64 %length, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %13 = phi i64 [ 0, %continue__1 ], [ %17, %exiting__1 ] + %14 = icmp sle i64 %13, %12 + br i1 %14, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 %13) + %16 = bitcast i8* %15 to i1* + store i1 %first, i1* %16, align 1 + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %13, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %11, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %11, i32 1) + %18 = sub i64 %length, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %idx = phi i64 [ 1, %exit__1 ], [ %35, %exiting__2 ] + %19 = icmp sle i64 %idx, %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %20, i32 -1) + %21 = call %Array* @__quantum__rt__array_copy(%Array* %20, i1 false) + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %idx) + %23 = bitcast i8* %22 to %Result** + %24 = load %Result*, %Result** %23, align 8 + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Result* }* getelementptr ({ %Result* }, { %Result* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Result* }* + %27 = getelementptr inbounds { %Result* }, { %Result* }* %26, i32 0, i32 0 + store %Result* %24, %Result** %27, align 8 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1 }* getelementptr ({ i1 }, { i1 }* null, i32 1) to i64)) + call void @__quantum__rt__callable_invoke(%Callable* %mapper, %Tuple* %25, %Tuple* %28) + %29 = bitcast %Tuple* %28 to { i1 }* + %30 = getelementptr inbounds { i1 }, { i1 }* %29, i32 0, i32 0 + %31 = load i1, i1* %30, align 1 + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %21, i64 %idx) + %33 = bitcast i8* %32 to i1* + %34 = load i1, i1* %33, align 1 + store i1 %31, i1* %33, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %21, i32 1) + store %Array* %21, %Array** %retval, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %20, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %35 = add i64 %idx, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + %36 = load %Array*, %Array** %retval, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %mapper, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %36, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %8, i32 -1) + ret %Array* %36 +} + +define internal void @Microsoft__Quantum__Diagnostics___1f44bb58994a4427b591e8ff1435ee54___QsRef0__FormattedFailure____body(i64 %actual, i64 %expected, %String* %message) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @14, i32 0, i32 0)) + %1 = call %String* @__quantum__rt__string_concatenate(%String* %0, %String* %message) + %2 = call %String* @__quantum__rt__string_concatenate(%String* %1, %String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %1, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([13 x i8], [13 x i8]* @15, i32 0, i32 0)) + %4 = call %String* @__quantum__rt__string_concatenate(%String* %2, %String* %3) + call void @__quantum__rt__string_update_reference_count(%String* %2, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + %5 = call %String* @__quantum__rt__int_to_string(i64 %expected) + %6 = call %String* @__quantum__rt__string_concatenate(%String* %4, %String* %5) + call void @__quantum__rt__string_update_reference_count(%String* %4, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %5, i32 -1) + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([11 x i8], [11 x i8]* @16, i32 0, i32 0)) + %8 = call %String* @__quantum__rt__string_concatenate(%String* %6, %String* %7) + call void @__quantum__rt__string_update_reference_count(%String* %6, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + %9 = call %String* @__quantum__rt__int_to_string(i64 %actual) + %10 = call %String* @__quantum__rt__string_concatenate(%String* %8, %String* %9) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__fail(%String* %10) + unreachable +} + +declare %String* @__quantum__rt__int_to_string(i64) + +declare void @__quantum__rt__fail(%String*) + +define internal void @Microsoft__Quantum__Convert__ResultAsBool__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Result* }* + %1 = getelementptr inbounds { %Result* }, { %Result* }* %0, i32 0, i32 0 + %2 = load %Result*, %Result** %1, align 8 + %3 = call i1 @Microsoft__Quantum__Convert__ResultAsBool__body(%Result* %2) + %4 = bitcast %Tuple* %result-tuple to { i1 }* + %5 = getelementptr inbounds { i1 }, { i1 }* %4, i32 0, i32 0 + store i1 %3, i1* %5, align 1 + ret void +} + +define internal i1 @Microsoft__Quantum__Convert__ResultAsBool__body(%Result* %input) { +entry: + %0 = call %Result* @__quantum__rt__result_get_zero() + %1 = call i1 @__quantum__rt__result_equal(%Result* %input, %Result* %0) + %2 = select i1 %1, i1 false, i1 true + ret i1 %2 +} + +declare %Result* @__quantum__rt__result_get_zero() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define internal void @Microsoft__Quantum__Intrinsic__M__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Qubit* }* + %1 = getelementptr inbounds { %Qubit* }, { %Qubit* }* %0, i32 0, i32 0 + %2 = load %Qubit*, %Qubit** %1, align 8 + %3 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %2) + %4 = bitcast %Tuple* %result-tuple to { %Result* }* + %5 = getelementptr inbounds { %Result* }, { %Result* }* %4, i32 0, i32 0 + store %Result* %3, %Result** %5, align 8 + ret void +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__qis__h__ctl(%Array*, %Qubit*) + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +define internal void @Microsoft__Quantum__Intrinsic__R__body(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__body(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__r__body(i2, double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__R__adj(i2 %pauli, double %theta, %Qubit* %qubit) { +entry: + call void @__quantum__qis__r__adj(i2 %pauli, double %theta, %Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__r__adj(i2, double, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__R__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, double, %Qubit* }* + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 2 + store i2 %pauli, i2* %6, align 1 + store double %theta, double* %7, align 8 + store %Qubit* %qubit, %Qubit** %8, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__r__ctl(%Array*, { i2, double, %Qubit* }*) + +define internal void @Microsoft__Quantum__Intrinsic__R__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 1 + %theta = load double, double* %2, align 8 + %3 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %0, i32 0, i32 2 + %qubit = load %Qubit*, %Qubit** %3, align 8 + %4 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %5 = bitcast %Tuple* %4 to { i2, double, %Qubit* }* + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 0 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 1 + %8 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %5, i32 0, i32 2 + store i2 %pauli, i2* %6, align 1 + store double %theta, double* %7, align 8 + store %Qubit* %qubit, %Qubit** %8, align 8 + call void @__quantum__qis__r__ctladj(%Array* %__controlQubits__, { i2, double, %Qubit* }* %5) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %4, i32 -1) + ret void +} + +declare void @__quantum__qis__r__ctladj(%Array*, { i2, double, %Qubit* }*) + +define internal void @Microsoft__Quantum__Intrinsic__Rz__adj(double %theta, %Qubit* %qubit) { +entry: + %theta__1 = fneg double %theta + call void @__quantum__qis__r__body(i2 -2, double %theta__1, %Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctl(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 -2, i2* %5, align 1 + store double %theta, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Rz__ctladj(%Array* %__controlQubits__, { double, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 0 + %theta = load double, double* %1, align 8 + %2 = getelementptr inbounds { double, %Qubit* }, { double, %Qubit* }* %0, i32 0, i32 1 + %qubit = load %Qubit*, %Qubit** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %theta__1 = fneg double %theta + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, double, %Qubit* }* getelementptr ({ i2, double, %Qubit* }, { i2, double, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { i2, double, %Qubit* }* + %5 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 1 + %7 = getelementptr inbounds { i2, double, %Qubit* }, { i2, double, %Qubit* }* %4, i32 0, i32 2 + store i2 -2, i2* %5, align 1 + store double %theta__1, double* %6, align 8 + store %Qubit* %qubit, %Qubit** %7, align 8 + call void @__quantum__qis__r__ctl(%Array* %__controlQubits__, { i2, double, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define void @Microsoft__Quantum__Samples__QAOA__RunQAOATrials__Interop() #0 { +entry: + call void @Microsoft__Quantum__Samples__QAOA__RunQAOATrials__body() + ret void +} + +define void @Microsoft__Quantum__Samples__QAOA__RunQAOATrials() #1 { +entry: + call void @Microsoft__Quantum__Samples__QAOA__RunQAOATrials__body() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @17, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/munchkin/tests/qsharp/qir_projects.sln b/src/munchkin/tests/qsharp/qir_projects.sln new file mode 100644 index 0000000..7a06771 --- /dev/null +++ b/src/munchkin/tests/qsharp/qir_projects.sln @@ -0,0 +1,73 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.5.33530.505 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "VQE", "VQE\VQE.csproj", "{34BD611C-D991-481C-B31B-4B0700835529}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "oracle-generator", "oracle-generator\oracle-generator.csproj", "{73329BD7-5006-46EF-B9A0-7CDE9316F2DD}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "simplified-oracle-generator", "simplified-oracle-generator\simplified-oracle-generator.csproj", "{6E51DA7D-1FF6-4A6E-80EA-502A752CDA29}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "bit-flip-code", "bit-flip-code\bit-flip-code.csproj", "{FD488FA5-0549-4DB7-A39D-3708141D9D1F}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "hydrogen-sim", "hydrogen-sim\hydrogen-sim.csproj", "{FC8A7F51-05DF-4410-AF00-C1037A4245B7}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "parallel-half-moons", "parallel-half-moons\parallel-half-moons.csproj", "{10684ED0-AFF3-44AC-9474-775E68EDBAB7}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "repeat-until", "repeat-until\repeat-until.csproj", "{97F38BD6-89E4-428C-83E8-7B56125CAF7E}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "QAOA", "qaoa\QAOA.csproj", "{F02EFB3C-7549-4622-8A2E-D77697FA23DB}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "minified-oracle-generator", "minified-oracle-generator\minified-oracle-generator.csproj", "{F292619B-E87E-481B-AB21-FFEC0BA51DCC}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {34BD611C-D991-481C-B31B-4B0700835529}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {34BD611C-D991-481C-B31B-4B0700835529}.Debug|Any CPU.Build.0 = Debug|Any CPU + {34BD611C-D991-481C-B31B-4B0700835529}.Release|Any CPU.ActiveCfg = Release|Any CPU + {34BD611C-D991-481C-B31B-4B0700835529}.Release|Any CPU.Build.0 = Release|Any CPU + {73329BD7-5006-46EF-B9A0-7CDE9316F2DD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {73329BD7-5006-46EF-B9A0-7CDE9316F2DD}.Debug|Any CPU.Build.0 = Debug|Any CPU + {73329BD7-5006-46EF-B9A0-7CDE9316F2DD}.Release|Any CPU.ActiveCfg = Release|Any CPU + {73329BD7-5006-46EF-B9A0-7CDE9316F2DD}.Release|Any CPU.Build.0 = Release|Any CPU + {6E51DA7D-1FF6-4A6E-80EA-502A752CDA29}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {6E51DA7D-1FF6-4A6E-80EA-502A752CDA29}.Debug|Any CPU.Build.0 = Debug|Any CPU + {6E51DA7D-1FF6-4A6E-80EA-502A752CDA29}.Release|Any CPU.ActiveCfg = Release|Any CPU + {6E51DA7D-1FF6-4A6E-80EA-502A752CDA29}.Release|Any CPU.Build.0 = Release|Any CPU + {FD488FA5-0549-4DB7-A39D-3708141D9D1F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FD488FA5-0549-4DB7-A39D-3708141D9D1F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FD488FA5-0549-4DB7-A39D-3708141D9D1F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FD488FA5-0549-4DB7-A39D-3708141D9D1F}.Release|Any CPU.Build.0 = Release|Any CPU + {FC8A7F51-05DF-4410-AF00-C1037A4245B7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FC8A7F51-05DF-4410-AF00-C1037A4245B7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FC8A7F51-05DF-4410-AF00-C1037A4245B7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FC8A7F51-05DF-4410-AF00-C1037A4245B7}.Release|Any CPU.Build.0 = Release|Any CPU + {10684ED0-AFF3-44AC-9474-775E68EDBAB7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {10684ED0-AFF3-44AC-9474-775E68EDBAB7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {10684ED0-AFF3-44AC-9474-775E68EDBAB7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {10684ED0-AFF3-44AC-9474-775E68EDBAB7}.Release|Any CPU.Build.0 = Release|Any CPU + {97F38BD6-89E4-428C-83E8-7B56125CAF7E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {97F38BD6-89E4-428C-83E8-7B56125CAF7E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {97F38BD6-89E4-428C-83E8-7B56125CAF7E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {97F38BD6-89E4-428C-83E8-7B56125CAF7E}.Release|Any CPU.Build.0 = Release|Any CPU + {F02EFB3C-7549-4622-8A2E-D77697FA23DB}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {F02EFB3C-7549-4622-8A2E-D77697FA23DB}.Debug|Any CPU.Build.0 = Debug|Any CPU + {F02EFB3C-7549-4622-8A2E-D77697FA23DB}.Release|Any CPU.ActiveCfg = Release|Any CPU + {F02EFB3C-7549-4622-8A2E-D77697FA23DB}.Release|Any CPU.Build.0 = Release|Any CPU + {F292619B-E87E-481B-AB21-FFEC0BA51DCC}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {F292619B-E87E-481B-AB21-FFEC0BA51DCC}.Debug|Any CPU.Build.0 = Debug|Any CPU + {F292619B-E87E-481B-AB21-FFEC0BA51DCC}.Release|Any CPU.ActiveCfg = Release|Any CPU + {F292619B-E87E-481B-AB21-FFEC0BA51DCC}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {158BD026-189E-4A08-9A56-CEAA357DCDFB} + EndGlobalSection +EndGlobal diff --git a/src/munchkin/tests/qsharp/repeat-until/Program.qs b/src/munchkin/tests/qsharp/repeat-until/Program.qs new file mode 100644 index 0000000..5492f0b --- /dev/null +++ b/src/munchkin/tests/qsharp/repeat-until/Program.qs @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. + +namespace Microsoft.Quantum.Samples.RepeatUntilSuccess { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Preparation; + open Microsoft.Quantum.Diagnostics; + + /// # Summary + /// Example of a Repeat-until-success algorithm implementing a circuit + /// decomposition by Paetznick & Svore. + /// # References + /// - [ *Adam Paetznick, Krysta M. Svore*, + /// Quantum Information & Computation 14(15 & 16): 1277-1301 (2014) + /// ](https://arxiv.org/abs/1311.1074) + /// + /// # Input + /// ## gate + /// Gate circuit to run ("simple" or "V") + /// ## inputBasis + /// Pauli basis in which to prepare input qubit + /// ## inputValue + /// Boolean value for input qubit (true maps to One, false maps to Zero) + /// ## limit + /// Integer limit to number of repeats of circuit + /// ## numRuns + /// Number of times to run the circuit + /// + /// # Remarks + /// The program executes a circuit on a "target" qubit using an "auxiliary" + /// qubit. + /// The goal is to measure Zero for the auxiliary qubit. + /// If this succeeds, the program will have effectively applied an + /// (I + i√2X)/√3 gate on the target qubit. + /// If this fails, the program reruns the circuit up to times. + @EntryPoint() + operation RunProgram( + gate: String, + inputValue : Bool, + inputBasis : Pauli, + limit : Int, + numRuns : Int + ) + : Unit { + if (gate != "simple" and gate != "V") { + Message($"Gate '{gate}' is invalid. Please specify a valid gate. Options are: 'simple' or 'V'."); + } else { + for n in 0 .. numRuns - 1 { + if (gate == "simple") { + let (success, result, numIter) = CreateQubitsAndApplySimpleGate( + inputValue, inputBasis, limit + ); + Message($"({success}, {result}, {numIter})"); + } elif (gate == "V") { + let (success, result, numIter) = CreateQubitsAndApplyRzArcTan2( + inputValue, inputBasis, limit + ); + Message($"({success}, {result}, {numIter})"); + } + } + } + } +} \ No newline at end of file diff --git a/src/munchkin/tests/qsharp/repeat-until/libLLVM.dll b/src/munchkin/tests/qsharp/repeat-until/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/repeat-until/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/repeat-until/qir/repeat-until.ll b/src/munchkin/tests/qsharp/repeat-until/qir/repeat-until.ll new file mode 100644 index 0000000..f80c668 --- /dev/null +++ b/src/munchkin/tests/qsharp/repeat-until/qir/repeat-until.ll @@ -0,0 +1,2337 @@ + +%Tuple = type opaque +%Result = type opaque +%Qubit = type opaque +%Array = type opaque +%String = type opaque +%Callable = type opaque +%Range = type { i64, i64, i64 } + +@0 = internal constant [37 x i8] c"Auxiliary qubit is not in |+\E2\9F\A9 stat\00" +@1 = internal constant [36 x i8] c"Resource qubit is not in |+\E2\9F\A9 stat\00" +@2 = internal constant [37 x i8] c"Auxiliary qubit is not in |0\E2\9F\A9 stat\00" +@3 = internal constant [17 x i8] c"Qubit is not in \00" +@4 = internal constant [30 x i8] c" state for given input basis.\00" +@5 = internal constant [7 x i8] c"simple\00" +@6 = internal constant [2 x i8] c"V\00" +@7 = internal constant [7 x i8] c"Gate '\00" +@8 = internal constant [2 x i8] c"\22\00" +@9 = internal constant [73 x i8] c"' is invalid. Please specify a valid gate. Options are: 'simple' or 'V'.\00" +@10 = internal constant [2 x i8] c"(\00" +@11 = internal constant [5 x i8] c"true\00" +@12 = internal constant [6 x i8] c"false\00" +@13 = internal constant [3 x i8] c", \00" +@14 = internal constant [2 x i8] c")\00" +@Microsoft__Quantum__Canon__ApplyP__FunctionTable = internal constant [4 x void (%Tuple*, %Tuple*, %Tuple*)*] [void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__body__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__adj__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__ctl__wrapper, void (%Tuple*, %Tuple*, %Tuple*)* @Microsoft__Quantum__Canon__ApplyP__ctladj__wrapper] + +define %Result* @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplyAndMeasurePart1__body(%Qubit* %auxiliary, %Qubit* %resource) { +entry: + call void @__quantum__qis__t__body(%Qubit* %auxiliary) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %resource, %Qubit* %auxiliary) + call void @__quantum__qis__t__adj(%Qubit* %auxiliary) + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 1, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %auxiliary, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare void @__quantum__qis__t__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) { +entry: + %__controlQubits__ = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %__controlQubits__, i64 0) + %1 = bitcast i8* %0 to %Qubit** + store %Qubit* %control, %Qubit** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__t__adj(%Qubit*) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +define %Result* @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplyAndMeasurePart2__body(%Qubit* %resource, %Qubit* %target) { +entry: + call void @__quantum__qis__t__body(%Qubit* %target) + call void @__quantum__qis__z__body(%Qubit* %target) + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %target, %Qubit* %resource) + call void @__quantum__qis__t__body(%Qubit* %resource) + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 1, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %resource, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare void @__quantum__qis__z__body(%Qubit*) + +define { i1, i64 }* @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplyRzArcTan2__body(i2 %inputBasis, i1 %inputValue, i64 %limit, %Qubit* %auxiliary, %Qubit* %resource, %Qubit* %target) { +entry: + %numIter = alloca i64, align 8 + %success = alloca i1, align 1 + %done = alloca i1, align 1 + store i1 false, i1* %done, align 1 + store i1 false, i1* %success, align 1 + store i64 0, i64* %numIter, align 4 + br label %repeat__1 + +repeat__1: ; preds = %fixup__1, %entry + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 1, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %auxiliary, %Qubit** %5, align 8 + %6 = call %Result* @__quantum__rt__result_get_zero() + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([37 x i8], [37 x i8]* @0, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %0, %Array* %3, %Result* %6, %String* %7) + %8 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %9 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %8, i64 0) + %10 = bitcast i8* %9 to i2* + store i2 1, i2* %10, align 1 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 0) + %13 = bitcast i8* %12 to %Qubit** + store %Qubit* %resource, %Qubit** %13, align 8 + %14 = call %Result* @__quantum__rt__result_get_zero() + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([36 x i8], [36 x i8]* @1, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %8, %Array* %11, %Result* %14, %String* %15) + call void @Microsoft__Quantum__Samples__RepeatUntilSuccess__AssertQubitIsInState__body(%Qubit* %target, i2 %inputBasis, i1 %inputValue) + %result1 = call %Result* @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplyAndMeasurePart1__body(%Qubit* %auxiliary, %Qubit* %resource) + %16 = call %Result* @__quantum__rt__result_get_zero() + %17 = call i1 @__quantum__rt__result_equal(%Result* %result1, %Result* %16) + br i1 %17, label %then0__1, label %else__1 + +then0__1: ; preds = %repeat__1 + %result2 = call %Result* @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplyAndMeasurePart2__body(%Qubit* %resource, %Qubit* %target) + %18 = call %Result* @__quantum__rt__result_get_zero() + %19 = call i1 @__quantum__rt__result_equal(%Result* %result2, %Result* %18) + br i1 %19, label %then0__2, label %else__2 + +then0__2: ; preds = %then0__1 + store i1 true, i1* %success, align 1 + br label %continue__2 + +else__2: ; preds = %then0__1 + call void @__quantum__qis__z__body(%Qubit* %resource) + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__2 + +continue__2: ; preds = %else__2, %then0__2 + call void @__quantum__rt__result_update_reference_count(%Result* %result2, i32 -1) + br label %continue__1 + +else__1: ; preds = %repeat__1 + call void @__quantum__qis__z__body(%Qubit* %auxiliary) + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %resource) + call void @__quantum__qis__h__body(%Qubit* %resource) + br label %continue__1 + +continue__1: ; preds = %else__1, %continue__2 + %20 = load i1, i1* %success, align 1 + br i1 %20, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %continue__1 + %21 = load i64, i64* %numIter, align 4 + %22 = icmp sge i64 %21, %limit + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %continue__1 + %23 = phi i1 [ %20, %continue__1 ], [ %22, %condFalse__1 ] + store i1 %23, i1* %done, align 1 + %24 = load i64, i64* %numIter, align 4 + %25 = add i64 %24, 1 + store i64 %25, i64* %numIter, align 4 + br label %until__1 + +until__1: ; preds = %condContinue__1 + br i1 %23, label %rend__1, label %fixup__1 + +fixup__1: ; preds = %until__1 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result1, i32 -1) + br label %repeat__1 + +rend__1: ; preds = %until__1 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %8, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result1, i32 -1) + %26 = load i1, i1* %success, align 1 + %27 = load i64, i64* %numIter, align 4 + %28 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, i64 }* getelementptr ({ i1, i64 }, { i1, i64 }* null, i32 1) to i64)) + %29 = bitcast %Tuple* %28 to { i1, i64 }* + %30 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %29, i32 0, i32 0 + %31 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %29, i32 0, i32 1 + store i1 %26, i1* %30, align 1 + store i64 %27, i64* %31, align 4 + ret { i1, i64 }* %29 +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double 1.000000e+00, %String* %msg, double 1.000000e-10) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare %Result* @__quantum__rt__result_get_zero() + +declare %String* @__quantum__rt__string_create(i8*) + +define void @Microsoft__Quantum__Samples__RepeatUntilSuccess__AssertQubitIsInState__body(%Qubit* %target, i2 %inputBasis, i1 %inputValue) { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + store i2 %inputBasis, i2* %2, align 1 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %target, %Qubit** %5, align 8 + br i1 %inputValue, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + %6 = call %Result* @__quantum__rt__result_get_one() + call void @__quantum__rt__result_update_reference_count(%Result* %6, i32 1) + br label %condContinue__1 + +condFalse__1: ; preds = %entry + %7 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %7, i32 1) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %8 = phi %Result* [ %6, %condTrue__1 ], [ %7, %condFalse__1 ] + br i1 %inputValue, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condContinue__1 + %9 = call %Result* @__quantum__rt__result_get_one() + call void @__quantum__rt__result_update_reference_count(%Result* %9, i32 1) + br label %condContinue__2 + +condFalse__2: ; preds = %condContinue__1 + %10 = call %Result* @__quantum__rt__result_get_zero() + call void @__quantum__rt__result_update_reference_count(%Result* %10, i32 1) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condTrue__2 + %11 = phi %Result* [ %9, %condTrue__2 ], [ %10, %condFalse__2 ] + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([17 x i8], [17 x i8]* @3, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__result_to_string(%Result* %11) + %14 = call %String* @__quantum__rt__string_concatenate(%String* %12, %String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([30 x i8], [30 x i8]* @4, i32 0, i32 0)) + %16 = call %String* @__quantum__rt__string_concatenate(%String* %14, %String* %15) + call void @__quantum__rt__string_update_reference_count(%String* %14, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %0, %Array* %3, %Result* %8, %String* %16) + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %8, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + ret void +} + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +define internal void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) { +entry: + %0 = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) + %1 = call %Result* @__quantum__rt__result_get_one() + %2 = call i1 @__quantum__rt__result_equal(%Result* %0, %Result* %1) + call void @__quantum__rt__result_update_reference_count(%Result* %0, i32 -1) + br i1 %2, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret void +} + +declare void @__quantum__qis__h__body(%Qubit*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %Tuple* @__quantum__rt__tuple_create(i64) + +define { i1, i64 }* @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplySimpleGate__body(i2 %inputBasis, i1 %inputValue, i64 %limit, %Array* %register) { +entry: + %numIter = alloca i64, align 8 + %success = alloca i1, align 1 + %done = alloca i1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + store i1 false, i1* %done, align 1 + store i1 false, i1* %success, align 1 + store i64 0, i64* %numIter, align 4 + br i1 %inputValue, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 1) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1, align 8 + call void @__quantum__qis__x__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 1) + %3 = bitcast i8* %2 to %Qubit** + %4 = load %Qubit*, %Qubit** %3, align 8 + call void @Microsoft__Quantum__Preparation__PreparePauliEigenstate__body(i2 %inputBasis, %Qubit* %4) + br label %repeat__1 + +repeat__1: ; preds = %fixup__1, %continue__1 + %5 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %5, i64 0) + %7 = bitcast i8* %6 to i2* + store i2 -2, i2* %7, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %11, i64 0) + %13 = bitcast i8* %12 to %Qubit** + store %Qubit* %10, %Qubit** %13, align 8 + %14 = call %Result* @__quantum__rt__result_get_zero() + %15 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([37 x i8], [37 x i8]* @2, i32 0, i32 0)) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %5, %Array* %11, %Result* %14, %String* %15) + %16 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 1) + %17 = bitcast i8* %16 to %Qubit** + %18 = load %Qubit*, %Qubit** %17, align 8 + call void @Microsoft__Quantum__Samples__RepeatUntilSuccess__AssertQubitIsInState__body(%Qubit* %18, i2 %inputBasis, i1 %inputValue) + call void @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplySimpleRUSCircuit__body(%Array* %register) + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %20 = bitcast i8* %19 to %Qubit** + %21 = load %Qubit*, %Qubit** %20, align 8 + %22 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %21) + %23 = call %Result* @__quantum__rt__result_get_zero() + %24 = call i1 @__quantum__rt__result_equal(%Result* %22, %Result* %23) + store i1 %24, i1* %success, align 1 + br i1 %24, label %condContinue__1, label %condFalse__1 + +condFalse__1: ; preds = %repeat__1 + %25 = load i64, i64* %numIter, align 4 + %26 = icmp sge i64 %25, %limit + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %repeat__1 + %27 = phi i1 [ %24, %repeat__1 ], [ %26, %condFalse__1 ] + store i1 %27, i1* %done, align 1 + %28 = load i64, i64* %numIter, align 4 + %29 = add i64 %28, 1 + store i64 %29, i64* %numIter, align 4 + br label %until__1 + +until__1: ; preds = %condContinue__1 + br i1 %27, label %rend__1, label %fixup__1 + +fixup__1: ; preds = %until__1 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %22, i32 -1) + br label %repeat__1 + +rend__1: ; preds = %until__1 + call void @__quantum__rt__array_update_reference_count(%Array* %5, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %15, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %22, i32 -1) + %30 = load i1, i1* %success, align 1 + %31 = load i64, i64* %numIter, align 4 + %32 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, i64 }* getelementptr ({ i1, i64 }, { i1, i64 }* null, i32 1) to i64)) + %33 = bitcast %Tuple* %32 to { i1, i64 }* + %34 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %33, i32 0, i32 0 + %35 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %33, i32 0, i32 1 + store i1 %30, i1* %34, align 1 + store i64 %31, i64* %35, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret { i1, i64 }* %33 +} + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal void @Microsoft__Quantum__Preparation__PreparePauliEigenstate__body(i2 %basis, %Qubit* %qubit) { +entry: + %0 = icmp eq i2 %basis, 0 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @Microsoft__Quantum__Preparation__PrepareSingleQubitIdentity__body(%Qubit* %qubit) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %basis, 1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__h__body(%Qubit* %qubit) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %basis, -1 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__h__body(%Qubit* %qubit) + call void @__quantum__qis__s__body(%Qubit* %qubit) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define void @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplySimpleRUSCircuit__body(%Array* %register) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %1 = bitcast i8* %0 to %Qubit** + %qubit = load %Qubit*, %Qubit** %1, align 8 + call void @__quantum__qis__h__body(%Qubit* %qubit) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %3 = bitcast i8* %2 to %Qubit** + %qubit__1 = load %Qubit*, %Qubit** %3, align 8 + call void @__quantum__qis__t__body(%Qubit* %qubit__1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %5 = bitcast i8* %4 to %Qubit** + %6 = load %Qubit*, %Qubit** %5, align 8 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 1) + %8 = bitcast i8* %7 to %Qubit** + %9 = load %Qubit*, %Qubit** %8, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %6, %Qubit* %9) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %11 = bitcast i8* %10 to %Qubit** + %qubit__2 = load %Qubit*, %Qubit** %11, align 8 + call void @__quantum__qis__h__body(%Qubit* %qubit__2) + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %13 = bitcast i8* %12 to %Qubit** + %14 = load %Qubit*, %Qubit** %13, align 8 + %15 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 1) + %16 = bitcast i8* %15 to %Qubit** + %17 = load %Qubit*, %Qubit** %16, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %14, %Qubit* %17) + %18 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %19 = bitcast i8* %18 to %Qubit** + %qubit__3 = load %Qubit*, %Qubit** %19, align 8 + call void @__quantum__qis__t__body(%Qubit* %qubit__3) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 0) + %21 = bitcast i8* %20 to %Qubit** + %qubit__4 = load %Qubit*, %Qubit** %21, align 8 + call void @__quantum__qis__h__body(%Qubit* %qubit__4) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) { +entry: + %result = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %target) + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %0) + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret %Result* %result +} + +declare %Result* @__quantum__rt__result_get_one() + +declare %String* @__quantum__rt__result_to_string(%Result*) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +define { i1, %Result*, i64 }* @Microsoft__Quantum__Samples__RepeatUntilSuccess__CreateQubitsAndApplyRzArcTan2__body(i1 %inputValue, i2 %inputBasis, i64 %limit) { +entry: + %auxiliary = call %Qubit* @__quantum__rt__qubit_allocate() + %resource = call %Qubit* @__quantum__rt__qubit_allocate() + %target = call %Qubit* @__quantum__rt__qubit_allocate() + call void @Microsoft__Quantum__Samples__RepeatUntilSuccess__InitializeQubits__body(i2 %inputBasis, i1 %inputValue, %Qubit* %auxiliary, %Qubit* %resource, %Qubit* %target) + %0 = call { i1, i64 }* @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplyRzArcTan2__body(i2 %inputBasis, i1 %inputValue, i64 %limit, %Qubit* %auxiliary, %Qubit* %resource, %Qubit* %target) + %1 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %0, i32 0, i32 0 + %success = load i1, i1* %1, align 1 + %2 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %0, i32 0, i32 1 + %numIter = load i64, i64* %2, align 4 + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %4 = bitcast i8* %3 to i2* + store i2 %inputBasis, i2* %4, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %target, %Qubit** %6, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %result = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + %7 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 3) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 1) + %11 = bitcast i8* %10 to %Qubit** + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %7, i64 2) + %13 = bitcast i8* %12 to %Qubit** + store %Qubit* %target, %Qubit** %9, align 8 + store %Qubit* %resource, %Qubit** %11, align 8 + store %Qubit* %auxiliary, %Qubit** %13, align 8 + call void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %7) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + %14 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Result*, i64 }* getelementptr ({ i1, %Result*, i64 }, { i1, %Result*, i64 }* null, i32 1) to i64)) + %15 = bitcast %Tuple* %14 to { i1, %Result*, i64 }* + %16 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %15, i32 0, i32 0 + %17 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %15, i32 0, i32 1 + %18 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %15, i32 0, i32 2 + store i1 %success, i1* %16, align 1 + store %Result* %result, %Result** %17, align 8 + store i64 %numIter, i64* %18, align 4 + %19 = bitcast { i1, i64 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %19, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %7, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %target) + call void @__quantum__rt__qubit_release(%Qubit* %resource) + call void @__quantum__rt__qubit_release(%Qubit* %auxiliary) + ret { i1, %Result*, i64 }* %15 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +define void @Microsoft__Quantum__Samples__RepeatUntilSuccess__InitializeQubits__body(i2 %inputBasis, i1 %inputValue, %Qubit* %auxiliary, %Qubit* %resource, %Qubit* %target) { +entry: + call void @__quantum__qis__h__body(%Qubit* %auxiliary) + call void @__quantum__qis__h__body(%Qubit* %resource) + br i1 %inputValue, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + call void @Microsoft__Quantum__Preparation__PreparePauliEigenstate__body(i2 %inputBasis, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__ResetAll__body(%Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %qubits) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %6, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 %2) + %5 = bitcast i8* %4 to %Qubit** + %qubit = load %Qubit*, %Qubit** %5, align 8 + call void @Microsoft__Quantum__Intrinsic__Reset__body(%Qubit* %qubit) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %6 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +declare void @__quantum__rt__tuple_update_reference_count(%Tuple*, i32) + +define { i1, %Result*, i64 }* @Microsoft__Quantum__Samples__RepeatUntilSuccess__CreateQubitsAndApplySimpleGate__body(i1 %inputValue, i2 %inputBasis, i64 %limit) { +entry: + %register = call %Array* @__quantum__rt__qubit_allocate_array(i64 2) + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %0 = call { i1, i64 }* @Microsoft__Quantum__Samples__RepeatUntilSuccess__ApplySimpleGate__body(i2 %inputBasis, i1 %inputValue, i64 %limit, %Array* %register) + %1 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %0, i32 0, i32 0 + %success = load i1, i1* %1, align 1 + %2 = getelementptr inbounds { i1, i64 }, { i1, i64 }* %0, i32 0, i32 1 + %numIter = load i64, i64* %2, align 4 + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %4 = bitcast i8* %3 to i2* + store i2 %inputBasis, i2* %4, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 1) + %6 = bitcast i8* %5 to %Qubit** + %7 = load %Qubit*, %Qubit** %6, align 8 + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %9 = bitcast i8* %8 to %Qubit** + store %Qubit* %7, %Qubit** %9, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %result = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + %10 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i1, %Result*, i64 }* getelementptr ({ i1, %Result*, i64 }, { i1, %Result*, i64 }* null, i32 1) to i64)) + %11 = bitcast %Tuple* %10 to { i1, %Result*, i64 }* + %12 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %11, i32 0, i32 0 + %13 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %11, i32 0, i32 1 + %14 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %11, i32 0, i32 2 + store i1 %success, i1* %12, align 1 + store %Result* %result, %Result** %13, align 8 + store i64 %numIter, i64* %14, align 4 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + %15 = bitcast { i1, i64 }* %0 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %15, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__qubit_release_array(%Array* %register) + ret { i1, %Result*, i64 }* %11 +} + +declare void @__quantum__rt__qubit_release_array(%Array*) + +define void @Microsoft__Quantum__Samples__RepeatUntilSuccess__RunProgram__body(%String* %gate, i1 %inputValue, i2 %inputBasis, i64 %limit, i64 %numRuns) { +entry: + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + %1 = call i1 @__quantum__rt__string_equal(%String* %gate, %String* %0) + %2 = xor i1 %1, true + br i1 %2, label %condTrue__1, label %condContinue__1 + +condTrue__1: ; preds = %entry + %3 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %4 = call i1 @__quantum__rt__string_equal(%String* %gate, %String* %3) + %5 = xor i1 %4, true + call void @__quantum__rt__string_update_reference_count(%String* %3, i32 -1) + br label %condContinue__1 + +condContinue__1: ; preds = %condTrue__1, %entry + %6 = phi i1 [ %5, %condTrue__1 ], [ %2, %entry ] + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + br i1 %6, label %then0__1, label %else__1 + +then0__1: ; preds = %condContinue__1 + %7 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @7, i32 0, i32 0)) + %8 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @8, i32 0, i32 0)) + %9 = call %String* @__quantum__rt__string_concatenate(%String* %8, %String* %gate) + %10 = call %String* @__quantum__rt__string_concatenate(%String* %9, %String* %8) + call void @__quantum__rt__string_update_reference_count(%String* %9, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %8, i32 -1) + %11 = call %String* @__quantum__rt__string_concatenate(%String* %7, %String* %10) + call void @__quantum__rt__string_update_reference_count(%String* %7, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %10, i32 -1) + %12 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([73 x i8], [73 x i8]* @9, i32 0, i32 0)) + %13 = call %String* @__quantum__rt__string_concatenate(%String* %11, %String* %12) + call void @__quantum__rt__string_update_reference_count(%String* %11, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %12, i32 -1) + call void @__quantum__rt__message(%String* %13) + call void @__quantum__rt__string_update_reference_count(%String* %13, i32 -1) + br label %continue__1 + +else__1: ; preds = %condContinue__1 + %14 = sub i64 %numRuns, 1 + br label %header__1 + +continue__1: ; preds = %exit__1, %then0__1 + ret void + +header__1: ; preds = %exiting__1, %else__1 + %n = phi i64 [ 0, %else__1 ], [ %60, %exiting__1 ] + %15 = icmp sle i64 %n, %14 + br i1 %15, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %16 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([7 x i8], [7 x i8]* @5, i32 0, i32 0)) + %17 = call i1 @__quantum__rt__string_equal(%String* %gate, %String* %16) + call void @__quantum__rt__string_update_reference_count(%String* %16, i32 -1) + br i1 %17, label %then0__2, label %test1__1 + +then0__2: ; preds = %body__1 + %18 = call { i1, %Result*, i64 }* @Microsoft__Quantum__Samples__RepeatUntilSuccess__CreateQubitsAndApplySimpleGate__body(i1 %inputValue, i2 %inputBasis, i64 %limit) + %19 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %18, i32 0, i32 0 + %success = load i1, i1* %19, align 1 + %20 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %18, i32 0, i32 1 + %result = load %Result*, %Result** %20, align 8 + %21 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %18, i32 0, i32 2 + %numIter = load i64, i64* %21, align 4 + %22 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @10, i32 0, i32 0)) + br i1 %success, label %condTrue__2, label %condFalse__1 + +condTrue__2: ; preds = %then0__2 + %23 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @11, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__1: ; preds = %then0__2 + %24 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @12, i32 0, i32 0)) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__1, %condTrue__2 + %25 = phi %String* [ %23, %condTrue__2 ], [ %24, %condFalse__1 ] + %26 = call %String* @__quantum__rt__string_concatenate(%String* %22, %String* %25) + call void @__quantum__rt__string_update_reference_count(%String* %22, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %25, i32 -1) + %27 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @13, i32 0, i32 0)) + %28 = call %String* @__quantum__rt__string_concatenate(%String* %26, %String* %27) + call void @__quantum__rt__string_update_reference_count(%String* %26, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %27, i32 -1) + %29 = call %String* @__quantum__rt__result_to_string(%Result* %result) + %30 = call %String* @__quantum__rt__string_concatenate(%String* %28, %String* %29) + call void @__quantum__rt__string_update_reference_count(%String* %28, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %29, i32 -1) + %31 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @13, i32 0, i32 0)) + %32 = call %String* @__quantum__rt__string_concatenate(%String* %30, %String* %31) + call void @__quantum__rt__string_update_reference_count(%String* %30, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %31, i32 -1) + %33 = call %String* @__quantum__rt__int_to_string(i64 %numIter) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + %35 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @14, i32 0, i32 0)) + %36 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %35) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %35, i32 -1) + call void @__quantum__rt__message(%String* %36) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + %37 = bitcast { i1, %Result*, i64 }* %18 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %37, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %36, i32 -1) + br label %continue__2 + +test1__1: ; preds = %body__1 + %38 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @6, i32 0, i32 0)) + %39 = call i1 @__quantum__rt__string_equal(%String* %gate, %String* %38) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + br i1 %39, label %then1__1, label %continue__2 + +then1__1: ; preds = %test1__1 + %40 = call { i1, %Result*, i64 }* @Microsoft__Quantum__Samples__RepeatUntilSuccess__CreateQubitsAndApplyRzArcTan2__body(i1 %inputValue, i2 %inputBasis, i64 %limit) + %41 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %40, i32 0, i32 0 + %success__1 = load i1, i1* %41, align 1 + %42 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %40, i32 0, i32 1 + %result__1 = load %Result*, %Result** %42, align 8 + %43 = getelementptr inbounds { i1, %Result*, i64 }, { i1, %Result*, i64 }* %40, i32 0, i32 2 + %numIter__1 = load i64, i64* %43, align 4 + %44 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @10, i32 0, i32 0)) + br i1 %success__1, label %condTrue__3, label %condFalse__2 + +condTrue__3: ; preds = %then1__1 + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @11, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__2: ; preds = %then1__1 + %46 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @12, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__2, %condTrue__3 + %47 = phi %String* [ %45, %condTrue__3 ], [ %46, %condFalse__2 ] + %48 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %47) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %47, i32 -1) + %49 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @13, i32 0, i32 0)) + %50 = call %String* @__quantum__rt__string_concatenate(%String* %48, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %48, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + %51 = call %String* @__quantum__rt__result_to_string(%Result* %result__1) + %52 = call %String* @__quantum__rt__string_concatenate(%String* %50, %String* %51) + call void @__quantum__rt__string_update_reference_count(%String* %50, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %51, i32 -1) + %53 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @13, i32 0, i32 0)) + %54 = call %String* @__quantum__rt__string_concatenate(%String* %52, %String* %53) + call void @__quantum__rt__string_update_reference_count(%String* %52, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %53, i32 -1) + %55 = call %String* @__quantum__rt__int_to_string(i64 %numIter__1) + %56 = call %String* @__quantum__rt__string_concatenate(%String* %54, %String* %55) + call void @__quantum__rt__string_update_reference_count(%String* %54, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %55, i32 -1) + %57 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @14, i32 0, i32 0)) + %58 = call %String* @__quantum__rt__string_concatenate(%String* %56, %String* %57) + call void @__quantum__rt__string_update_reference_count(%String* %56, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %57, i32 -1) + call void @__quantum__rt__message(%String* %58) + call void @__quantum__rt__result_update_reference_count(%Result* %result__1, i32 -1) + %59 = bitcast { i1, %Result*, i64 }* %40 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %59, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %58, i32 -1) + br label %continue__2 + +continue__2: ; preds = %condContinue__3, %test1__1, %condContinue__2 + br label %exiting__1 + +exiting__1: ; preds = %continue__2 + %60 = add i64 %n, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + br label %continue__1 +} + +declare i1 @__quantum__rt__string_equal(%String*, %String*) + +declare void @__quantum__rt__message(%String*) + +declare %String* @__quantum__rt__int_to_string(i64) + +define internal i64 @Microsoft__Quantum__Random__DrawRandomInt__body(i64 %min, i64 %max) { +entry: + %0 = call i64 @__quantum__qis__drawrandomint__body(i64 %min, i64 %max) + ret i64 %0 +} + +declare i64 @__quantum__qis__drawrandomint__body(i64, i64) + +define internal i2 @Microsoft__Quantum__Random__DrawRandomPauli__body() { +entry: + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 4) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i2* + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 1) + %4 = bitcast i8* %3 to i2* + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 2) + %6 = bitcast i8* %5 to i2* + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 3) + %8 = bitcast i8* %7 to i2* + store i2 0, i2* %2, align 1 + store i2 1, i2* %4, align 1 + store i2 -1, i2* %6, align 1 + store i2 -2, i2* %8, align 1 + %9 = call i64 @__quantum__qis__drawrandomint__body(i64 0, i64 3) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %9) + %11 = bitcast i8* %10 to i2* + %12 = load i2, i2* %11, align 1 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + ret i2 %12 +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__adj(%Qubit* %control, %Qubit* %target) { +entry: + call void @Microsoft__Quantum__Intrinsic__CNOT__body(%Qubit* %control, %Qubit* %target) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %3, i64 0) + %5 = bitcast i8* %4 to %Qubit** + store %Qubit* %control, %Qubit** %5, align 8 + %__controlQubits__1 = call %Array* @__quantum__rt__array_concatenate(%Array* %__controlQubits__, %Array* %3) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__1, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %3, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__1, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_concatenate(%Array*, %Array*) + +define internal void @Microsoft__Quantum__Intrinsic__CNOT__ctladj(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 0 + %control = load %Qubit*, %Qubit** %1, align 8 + %2 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Qubit*, %Qubit* }* getelementptr ({ %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* null, i32 1) to i64)) + %4 = bitcast %Tuple* %3 to { %Qubit*, %Qubit* }* + %5 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 0 + %6 = getelementptr inbounds { %Qubit*, %Qubit* }, { %Qubit*, %Qubit* }* %4, i32 0, i32 1 + store %Qubit* %control, %Qubit** %5, align 8 + store %Qubit* %target, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Intrinsic__CNOT__ctl(%Array* %__controlQubits__, { %Qubit*, %Qubit* }* %4) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %3, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__h__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__H__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__h__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__H__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__h__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +declare i64 @__quantum__rt__array_get_size_1d(%Array*) + +define internal void @Microsoft__Quantum__Intrinsic__S__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__s__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__s__adj(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__s__adj(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__s__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__S__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__s__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__s__ctladj(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__T__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__t__adj(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__T__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__t__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__t__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__T__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__t__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__t__ctladj(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +declare void @__quantum__qis__y__body(%Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__y__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__y__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Y__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__z__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__z__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__Z__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %pauli, %Qubit* %target) { +entry: + %0 = icmp eq i2 %pauli, 1 + br i1 %0, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +test1__1: ; preds = %entry + %1 = icmp eq i2 %pauli, -1 + br i1 %1, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__qis__y__body(%Qubit* %target) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %2 = icmp eq i2 %pauli, -2 + br i1 %2, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__qis__z__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %__controlQubits__, { i2, %Qubit* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %pauli = load i2, i2* %1, align 1 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %target = load %Qubit*, %Qubit** %2, align 8 + %3 = icmp eq i2 %pauli, 1 + br i1 %3, label %then0__1, label %test1__1 + +then0__1: ; preds = %entry + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test1__1: ; preds = %entry + %4 = icmp eq i2 %pauli, -1 + br i1 %4, label %then1__1, label %test2__1 + +then1__1: ; preds = %test1__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__y__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +test2__1: ; preds = %test1__1 + %5 = icmp eq i2 %pauli, -2 + br i1 %5, label %then2__1, label %continue__1 + +then2__1: ; preds = %test2__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__z__ctl(%Array* %__controlQubits__, %Qubit* %target) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + br label %continue__1 + +continue__1: ; preds = %then2__1, %test2__1, %then1__1, %then0__1 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__body(%Array* %pauli, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___567b522d5a454dd698f7d8d488e6e7a2_Zipped__body(%Array* %pauli, %Array* %target) + call void @Microsoft__Quantum__Canon___4f5c61b64c80401cb80755aceb03bc25_ApplyToEachCA__body(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { i2, %Qubit* }** + %8 = load { i2, %Qubit* }*, { i2, %Qubit* }** %7, align 8 + %9 = bitcast { i2, %Qubit* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4f5c61b64c80401cb80755aceb03bc25_ApplyToEachCA__body(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___145e5135b2584be9b0848927ca7c70d6_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %13 = icmp sgt i64 %11, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %10, %preheader__1 ], [ %21, %exiting__2 ] + %14 = icmp sle i64 %idxQubit, %12 + %15 = icmp sge i64 %idxQubit, %12 + %16 = select i1 %13, i1 %14, i1 %15 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %18 = bitcast i8* %17 to { i2, %Qubit* }** + %19 = load { i2, %Qubit* }*, { i2, %Qubit* }** %18, align 8 + %20 = bitcast { i2, %Qubit* }* %19 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %singleElementOperation, %Tuple* %20, %Tuple* null) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %21 = add i64 %idxQubit, %11 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %22 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %23 = phi i64 [ 0, %exit__2 ], [ %29, %exiting__3 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %23) + %26 = bitcast i8* %25 to { i2, %Qubit* }** + %27 = load { i2, %Qubit* }*, { i2, %Qubit* }** %26, align 8 + %28 = bitcast { i2, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %29 = add i64 %23, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__body__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i2, %Qubit* }* + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %3 = load i2, i2* %1, align 1 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__body(i2 %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__adj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { i2, %Qubit* }* + %1 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %0, i32 0, i32 1 + %3 = load i2, i2* %1, align 1 + %4 = load %Qubit*, %Qubit** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__adj(i2 %3, %Qubit* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctl__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i2, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i2, %Qubit* }*, { i2, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctl(%Array* %3, { i2, %Qubit* }* %4) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyP__ctladj__wrapper(%Tuple* %capture-tuple, %Tuple* %arg-tuple, %Tuple* %result-tuple) { +entry: + %0 = bitcast %Tuple* %arg-tuple to { %Array*, { i2, %Qubit* }* }* + %1 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 0 + %2 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %0, i32 0, i32 1 + %3 = load %Array*, %Array** %1, align 8 + %4 = load { i2, %Qubit* }*, { i2, %Qubit* }** %2, align 8 + call void @Microsoft__Quantum__Canon__ApplyP__ctladj(%Array* %3, { i2, %Qubit* }* %4) + ret void +} + +declare %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]*, [2 x void (%Tuple*, i32)*]*, %Tuple*) + +define internal %Array* @Microsoft__Quantum__Arrays___567b522d5a454dd698f7d8d488e6e7a2_Zipped__body(%Array* %left, %Array* %right) { +entry: + %output = alloca %Array*, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %left) + %1 = call i64 @__quantum__rt__array_get_size_1d(%Array* %right) + %2 = icmp slt i64 %0, %1 + br i1 %2, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %entry + br label %condContinue__1 + +condFalse__1: ; preds = %entry + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %nElements = phi i64 [ %0, %condTrue__1 ], [ %1, %condFalse__1 ] + %3 = icmp eq i64 %nElements, 0 + br i1 %3, label %then0__1, label %continue__1 + +then0__1: ; preds = %condContinue__1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 0) + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + ret %Array* %4 + +continue__1: ; preds = %condContinue__1 + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 0) + %6 = bitcast i8* %5 to i2* + %7 = load i2, i2* %6, align 1 + %8 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 0) + %9 = bitcast i8* %8 to %Qubit** + %10 = load %Qubit*, %Qubit** %9, align 8 + %11 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %12 = bitcast %Tuple* %11 to { i2, %Qubit* }* + %13 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %12, i32 0, i32 0 + %14 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %12, i32 0, i32 1 + store i2 %7, i2* %13, align 1 + store %Qubit* %10, %Qubit** %14, align 8 + %15 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 %nElements) + %16 = sub i64 %nElements, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %continue__1 + %17 = phi i64 [ 0, %continue__1 ], [ %21, %exiting__1 ] + %18 = icmp sle i64 %17, %16 + br i1 %18, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %19 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %17) + %20 = bitcast i8* %19 to { i2, %Qubit* }** + store { i2, %Qubit* }* %12, { i2, %Qubit* }** %20, align 8 + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %21 = add i64 %17, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + store %Array* %15, %Array** %output, align 8 + %22 = sub i64 %nElements, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %23 = phi i64 [ 0, %exit__1 ], [ %29, %exiting__2 ] + %24 = icmp sle i64 %23, %22 + br i1 %24, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %25 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %15, i64 %23) + %26 = bitcast i8* %25 to { i2, %Qubit* }** + %27 = load { i2, %Qubit* }*, { i2, %Qubit* }** %26, align 8 + %28 = bitcast { i2, %Qubit* }* %27 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %28, i32 1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %23, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %15, i32 1) + %30 = sub i64 %nElements, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %idxElement = phi i64 [ 1, %exit__2 ], [ %48, %exiting__3 ] + %31 = icmp sle i64 %idxElement, %30 + br i1 %31, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %32 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %32, i32 -1) + %33 = call %Array* @__quantum__rt__array_copy(%Array* %32, i1 false) + %34 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %left, i64 %idxElement) + %35 = bitcast i8* %34 to i2* + %36 = load i2, i2* %35, align 1 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %right, i64 %idxElement) + %38 = bitcast i8* %37 to %Qubit** + %39 = load %Qubit*, %Qubit** %38, align 8 + %40 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ i2, %Qubit* }* getelementptr ({ i2, %Qubit* }, { i2, %Qubit* }* null, i32 1) to i64)) + %41 = bitcast %Tuple* %40 to { i2, %Qubit* }* + %42 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %41, i32 0, i32 0 + %43 = getelementptr inbounds { i2, %Qubit* }, { i2, %Qubit* }* %41, i32 0, i32 1 + store i2 %36, i2* %42, align 1 + store %Qubit* %39, %Qubit** %43, align 8 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %33, i64 %idxElement) + %45 = bitcast i8* %44 to { i2, %Qubit* }** + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 1) + %46 = load { i2, %Qubit* }*, { i2, %Qubit* }** %45, align 8 + %47 = bitcast { i2, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %47, i32 -1) + store { i2, %Qubit* }* %41, { i2, %Qubit* }** %45, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %33, i32 1) + store %Array* %33, %Array** %output, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %32, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %idxElement, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + %49 = load %Array*, %Array** %output, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %left, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %right, i32 -1) + %50 = call i64 @__quantum__rt__array_get_size_1d(%Array* %49) + %51 = sub i64 %50, 1 + br label %header__4 + +header__4: ; preds = %exiting__4, %exit__3 + %52 = phi i64 [ 0, %exit__3 ], [ %58, %exiting__4 ] + %53 = icmp sle i64 %52, %51 + br i1 %53, label %body__4, label %exit__4 + +body__4: ; preds = %header__4 + %54 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %49, i64 %52) + %55 = bitcast i8* %54 to { i2, %Qubit* }** + %56 = load { i2, %Qubit* }*, { i2, %Qubit* }** %55, align 8 + %57 = bitcast { i2, %Qubit* }* %56 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %57, i32 -1) + br label %exiting__4 + +exiting__4: ; preds = %body__4 + %58 = add i64 %52, 1 + br label %header__4 + +exit__4: ; preds = %header__4 + call void @__quantum__rt__array_update_alias_count(%Array* %49, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %11, i32 -1) + ret %Array* %49 +} + +declare void @__quantum__rt__capture_update_reference_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_reference_count(%Callable*, i32) + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__adj(%Array* %pauli, %Array* %target) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %0 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %1 = call %Array* @Microsoft__Quantum__Arrays___567b522d5a454dd698f7d8d488e6e7a2_Zipped__body(%Array* %pauli, %Array* %target) + call void @Microsoft__Quantum__Canon___4f5c61b64c80401cb80755aceb03bc25_ApplyToEachCA__adj(%Callable* %0, %Array* %1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %0, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %0, i32 -1) + %2 = call i64 @__quantum__rt__array_get_size_1d(%Array* %1) + %3 = sub i64 %2, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %4 = phi i64 [ 0, %entry ], [ %10, %exiting__1 ] + %5 = icmp sle i64 %4, %3 + br i1 %5, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %6 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 %4) + %7 = bitcast i8* %6 to { i2, %Qubit* }** + %8 = load { i2, %Qubit* }*, { i2, %Qubit* }** %7, align 8 + %9 = bitcast { i2, %Qubit* }* %8 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %9, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %10 = add i64 %4, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4f5c61b64c80401cb80755aceb03bc25_ApplyToEachCA__adj(%Callable* %singleElementOperation, %Array* %register) { +entry: + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %9 = call %Range @Microsoft__Quantum__Arrays___145e5135b2584be9b0848927ca7c70d6_IndexRange__body(%Array* %register) + %10 = extractvalue %Range %9, 0 + %11 = extractvalue %Range %9, 1 + %12 = extractvalue %Range %9, 2 + %13 = sub i64 %12, %10 + %14 = sdiv i64 %13, %11 + %15 = mul i64 %11, %14 + %16 = add i64 %10, %15 + %17 = sub i64 0, %11 + %18 = insertvalue %Range zeroinitializer, i64 %16, 0 + %19 = insertvalue %Range %18, i64 %17, 1 + %20 = insertvalue %Range %19, i64 %10, 2 + %21 = extractvalue %Range %20, 0 + %22 = extractvalue %Range %20, 1 + %23 = extractvalue %Range %20, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %24 = icmp sgt i64 %22, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %21, %preheader__1 ], [ %33, %exiting__2 ] + %25 = icmp sle i64 %__qsVar0__idxQubit__, %23 + %26 = icmp sge i64 %__qsVar0__idxQubit__, %23 + %27 = select i1 %24, i1 %25, i1 %26 + br i1 %27, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %28 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %28) + %29 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %30 = bitcast i8* %29 to { i2, %Qubit* }** + %31 = load { i2, %Qubit* }*, { i2, %Qubit* }** %30, align 8 + %32 = bitcast { i2, %Qubit* }* %31 to %Tuple* + call void @__quantum__rt__callable_invoke(%Callable* %28, %Tuple* %32, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %28, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %28, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %33 = add i64 %__qsVar0__idxQubit__, %22 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %34 = sub i64 %0, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %35 = phi i64 [ 0, %exit__2 ], [ %41, %exiting__3 ] + %36 = icmp sle i64 %35, %34 + br i1 %36, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %37 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %35) + %38 = bitcast i8* %37 to { i2, %Qubit* }** + %39 = load { i2, %Qubit* }*, { i2, %Qubit* }** %38, align 8 + %40 = bitcast { i2, %Qubit* }* %39 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %40, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %41 = add i64 %35, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__ctl(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___567b522d5a454dd698f7d8d488e6e7a2_Zipped__body(%Array* %pauli, %Array* %target) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___4f5c61b64c80401cb80755aceb03bc25_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %10 = sub i64 %9, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %11 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %11) + %14 = bitcast i8* %13 to { i2, %Qubit* }** + %15 = load { i2, %Qubit* }*, { i2, %Qubit* }** %14, align 8 + %16 = bitcast { i2, %Qubit* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4f5c61b64c80401cb80755aceb03bc25_ApplyToEachCA__ctl(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %5) + %8 = bitcast i8* %7 to { i2, %Qubit* }** + %9 = load { i2, %Qubit* }*, { i2, %Qubit* }** %8, align 8 + %10 = bitcast { i2, %Qubit* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %12 = call %Range @Microsoft__Quantum__Arrays___145e5135b2584be9b0848927ca7c70d6_IndexRange__body(%Array* %register) + %13 = extractvalue %Range %12, 0 + %14 = extractvalue %Range %12, 1 + %15 = extractvalue %Range %12, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %16 = icmp sgt i64 %14, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %idxQubit = phi i64 [ %13, %preheader__1 ], [ %29, %exiting__2 ] + %17 = icmp sle i64 %idxQubit, %15 + %18 = icmp sge i64 %idxQubit, %15 + %19 = select i1 %16, i1 %17, i1 %18 + br i1 %19, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %20 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 1) + call void @__quantum__rt__callable_make_controlled(%Callable* %20) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %21 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %idxQubit) + %22 = bitcast i8* %21 to { i2, %Qubit* }** + %23 = load { i2, %Qubit* }*, { i2, %Qubit* }** %22, align 8 + %24 = bitcast { i2, %Qubit* }* %23 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 1) + %25 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i2, %Qubit* }* }* getelementptr ({ %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* null, i32 1) to i64)) + %26 = bitcast %Tuple* %25 to { %Array*, { i2, %Qubit* }* }* + %27 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %26, i32 0, i32 0 + %28 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %26, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %27, align 8 + store { i2, %Qubit* }* %23, { i2, %Qubit* }** %28, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %20, %Tuple* %25, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %20, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %24, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %25, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %29 = add i64 %idxQubit, %14 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %30 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %31 = phi i64 [ 0, %exit__2 ], [ %37, %exiting__3 ] + %32 = icmp sle i64 %31, %30 + br i1 %32, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %33 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %31) + %34 = bitcast i8* %33 to { i2, %Qubit* }** + %35 = load { i2, %Qubit* }*, { i2, %Qubit* }** %34, align 8 + %36 = bitcast { i2, %Qubit* }* %35 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %36, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %37 = add i64 %31, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon__ApplyPauli__ctladj(%Array* %__controlQubits__, { %Array*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 0 + %pauli = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 1) + %2 = getelementptr inbounds { %Array*, %Array* }, { %Array*, %Array* }* %0, i32 0, i32 1 + %target = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 1) + %3 = call %Callable* @__quantum__rt__callable_create([4 x void (%Tuple*, %Tuple*, %Tuple*)*]* @Microsoft__Quantum__Canon__ApplyP__FunctionTable, [2 x void (%Tuple*, i32)*]* null, %Tuple* null) + %4 = call %Array* @Microsoft__Quantum__Arrays___567b522d5a454dd698f7d8d488e6e7a2_Zipped__body(%Array* %pauli, %Array* %target) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Callable*, %Array* }* getelementptr ({ %Callable*, %Array* }, { %Callable*, %Array* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Callable*, %Array* }* + %7 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %6, i32 0, i32 1 + store %Callable* %3, %Callable** %7, align 8 + store %Array* %4, %Array** %8, align 8 + call void @Microsoft__Quantum__Canon___4f5c61b64c80401cb80755aceb03bc25_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %pauli, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %target, i32 -1) + call void @__quantum__rt__capture_update_reference_count(%Callable* %3, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %3, i32 -1) + %9 = call i64 @__quantum__rt__array_get_size_1d(%Array* %4) + %10 = sub i64 %9, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %11 = phi i64 [ 0, %entry ], [ %17, %exiting__1 ] + %12 = icmp sle i64 %11, %10 + br i1 %12, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %13 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 %11) + %14 = bitcast i8* %13 to { i2, %Qubit* }** + %15 = load { i2, %Qubit* }*, { i2, %Qubit* }** %14, align 8 + %16 = bitcast { i2, %Qubit* }* %15 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %16, i32 -1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %17 = add i64 %11, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Canon___4f5c61b64c80401cb80755aceb03bc25_ApplyToEachCA__ctladj(%Array* %__controlQubits__, { %Callable*, %Array* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 0 + %singleElementOperation = load %Callable*, %Callable** %1, align 8 + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 1) + %2 = getelementptr inbounds { %Callable*, %Array* }, { %Callable*, %Array* }* %0, i32 0, i32 1 + %register = load %Array*, %Array** %2, align 8 + %3 = call i64 @__quantum__rt__array_get_size_1d(%Array* %register) + %4 = sub i64 %3, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %11, %exiting__1 ] + %6 = icmp sle i64 %5, %4 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %5) + %8 = bitcast i8* %7 to { i2, %Qubit* }** + %9 = load { i2, %Qubit* }*, { i2, %Qubit* }** %8, align 8 + %10 = bitcast { i2, %Qubit* }* %9 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %10, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %11 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 1) + %12 = call %Range @Microsoft__Quantum__Arrays___145e5135b2584be9b0848927ca7c70d6_IndexRange__body(%Array* %register) + %13 = extractvalue %Range %12, 0 + %14 = extractvalue %Range %12, 1 + %15 = extractvalue %Range %12, 2 + %16 = sub i64 %15, %13 + %17 = sdiv i64 %16, %14 + %18 = mul i64 %14, %17 + %19 = add i64 %13, %18 + %20 = sub i64 0, %14 + %21 = insertvalue %Range zeroinitializer, i64 %19, 0 + %22 = insertvalue %Range %21, i64 %20, 1 + %23 = insertvalue %Range %22, i64 %13, 2 + %24 = extractvalue %Range %23, 0 + %25 = extractvalue %Range %23, 1 + %26 = extractvalue %Range %23, 2 + br label %preheader__1 + +preheader__1: ; preds = %exit__1 + %27 = icmp sgt i64 %25, 0 + br label %header__2 + +header__2: ; preds = %exiting__2, %preheader__1 + %__qsVar0__idxQubit__ = phi i64 [ %24, %preheader__1 ], [ %40, %exiting__2 ] + %28 = icmp sle i64 %__qsVar0__idxQubit__, %26 + %29 = icmp sge i64 %__qsVar0__idxQubit__, %26 + %30 = select i1 %27, i1 %28, i1 %29 + br i1 %30, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %31 = call %Callable* @__quantum__rt__callable_copy(%Callable* %singleElementOperation, i1 false) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 1) + call void @__quantum__rt__callable_make_adjoint(%Callable* %31) + call void @__quantum__rt__callable_make_controlled(%Callable* %31) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 1) + %32 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %__qsVar0__idxQubit__) + %33 = bitcast i8* %32 to { i2, %Qubit* }** + %34 = load { i2, %Qubit* }*, { i2, %Qubit* }** %33, align 8 + %35 = bitcast { i2, %Qubit* }* %34 to %Tuple* + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 1) + %36 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, { i2, %Qubit* }* }* getelementptr ({ %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* null, i32 1) to i64)) + %37 = bitcast %Tuple* %36 to { %Array*, { i2, %Qubit* }* }* + %38 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %37, i32 0, i32 0 + %39 = getelementptr inbounds { %Array*, { i2, %Qubit* }* }, { %Array*, { i2, %Qubit* }* }* %37, i32 0, i32 1 + store %Array* %__controlQubits__, %Array** %38, align 8 + store { i2, %Qubit* }* %34, { i2, %Qubit* }** %39, align 8 + call void @__quantum__rt__callable_invoke(%Callable* %31, %Tuple* %36, %Tuple* null) + call void @__quantum__rt__capture_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__callable_update_reference_count(%Callable* %31, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %35, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %36, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %40 = add i64 %__qsVar0__idxQubit__, %25 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__capture_update_alias_count(%Callable* %singleElementOperation, i32 -1) + call void @__quantum__rt__callable_update_alias_count(%Callable* %singleElementOperation, i32 -1) + %41 = sub i64 %3, 1 + br label %header__3 + +header__3: ; preds = %exiting__3, %exit__2 + %42 = phi i64 [ 0, %exit__2 ], [ %48, %exiting__3 ] + %43 = icmp sle i64 %42, %41 + br i1 %43, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %44 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %register, i64 %42) + %45 = bitcast i8* %44 to { i2, %Qubit* }** + %46 = load { i2, %Qubit* }*, { i2, %Qubit* }** %45, align 8 + %47 = bitcast { i2, %Qubit* }* %46 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %47, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %body__3 + %48 = add i64 %42, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_alias_count(%Array* %register, i32 -1) + ret void +} + +declare void @__quantum__rt__capture_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__callable_update_alias_count(%Callable*, i32) + +declare void @__quantum__rt__tuple_update_alias_count(%Tuple*, i32) + +define internal %Range @Microsoft__Quantum__Arrays___145e5135b2584be9b0848927ca7c70d6_IndexRange__body(%Array* %array) { +entry: + %0 = call i64 @__quantum__rt__array_get_size_1d(%Array* %array) + %1 = sub i64 %0, 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %2 = phi i64 [ 0, %entry ], [ %8, %exiting__1 ] + %3 = icmp sle i64 %2, %1 + br i1 %3, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %4 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %2) + %5 = bitcast i8* %4 to { i2, %Qubit* }** + %6 = load { i2, %Qubit* }*, { i2, %Qubit* }** %5, align 8 + %7 = bitcast { i2, %Qubit* }* %6 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %7, i32 1) + br label %exiting__1 + +exiting__1: ; preds = %body__1 + %8 = add i64 %2, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 1) + %9 = sub i64 %0, 1 + %10 = insertvalue %Range { i64 0, i64 1, i64 0 }, i64 %9, 2 + %11 = sub i64 %0, 1 + br label %header__2 + +header__2: ; preds = %exiting__2, %exit__1 + %12 = phi i64 [ 0, %exit__1 ], [ %18, %exiting__2 ] + %13 = icmp sle i64 %12, %11 + br i1 %13, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %14 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %array, i64 %12) + %15 = bitcast i8* %14 to { i2, %Qubit* }** + %16 = load { i2, %Qubit* }*, { i2, %Qubit* }** %15, align 8 + %17 = bitcast { i2, %Qubit* }* %16 to %Tuple* + call void @__quantum__rt__tuple_update_alias_count(%Tuple* %17, i32 -1) + br label %exiting__2 + +exiting__2: ; preds = %body__2 + %18 = add i64 %12, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_alias_count(%Array* %array, i32 -1) + ret %Range %10 +} + +declare void @__quantum__rt__callable_invoke(%Callable*, %Tuple*, %Tuple*) + +declare %Callable* @__quantum__rt__callable_copy(%Callable*, i1) + +declare void @__quantum__rt__callable_make_adjoint(%Callable*) + +declare void @__quantum__rt__callable_make_controlled(%Callable*) + +declare void @__quantum__qis__assertmeasurementprobability__body(%Array*, %Array*, %Result*, double, %String*, double) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__adj(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__body(%Array* %bases, %Array* %qubits, %Result* %result, %String* %msg) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %controllingQubits, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %controllingQubits, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %0, i32 0, i32 3 + %msg = load %String*, %String** %4, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %5 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, %String* }* getelementptr ({ %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* null, i32 1) to i64)) + %6 = bitcast %Tuple* %5 to { %Array*, %Array*, %Result*, %String* }* + %7 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 0 + %8 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 1 + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 2 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, %String* }, { %Array*, %Array*, %Result*, %String* }* %6, i32 0, i32 3 + store %Array* %bases, %Array** %7, align 8 + store %Array* %qubits, %Array** %8, align 8 + store %Result* %result, %Result** %9, align 8 + store %String* %msg, %String** %10, align 8 + call void @Microsoft__Quantum__Diagnostics__AssertMeasurement__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, %String* }* %6) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %5, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__adj(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + call void @__quantum__qis__assertmeasurementprobability__body(%Array* %bases, %Array* %qubits, %Result* %result, double %prob, %String* %msg, double %tolerance) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret void +} + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare void @__quantum__qis__assertmeasurementprobability__ctl(%Array*, { %Array*, %Array*, %Result*, double, %String*, double }*) + +define internal void @Microsoft__Quantum__Diagnostics__AssertMeasurementProbability__ctladj(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %0) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + %1 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 0 + %bases = load %Array*, %Array** %1, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %2 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 1 + %qubits = load %Array*, %Array** %2, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %3 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 2 + %result = load %Result*, %Result** %3, align 8 + %4 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 3 + %prob = load double, double* %4, align 8 + %5 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 4 + %msg = load %String*, %String** %5, align 8 + %6 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %0, i32 0, i32 5 + %tolerance = load double, double* %6, align 8 + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 1) + %7 = call %Tuple* @__quantum__rt__tuple_create(i64 ptrtoint ({ %Array*, %Array*, %Result*, double, %String*, double }* getelementptr ({ %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* null, i32 1) to i64)) + %8 = bitcast %Tuple* %7 to { %Array*, %Array*, %Result*, double, %String*, double }* + %9 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 0 + %10 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 1 + %11 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 2 + %12 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 3 + %13 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 4 + %14 = getelementptr inbounds { %Array*, %Array*, %Result*, double, %String*, double }, { %Array*, %Array*, %Result*, double, %String*, double }* %8, i32 0, i32 5 + store %Array* %bases, %Array** %9, align 8 + store %Array* %qubits, %Array** %10, align 8 + store %Result* %result, %Result** %11, align 8 + store double %prob, double* %12, align 8 + store %String* %msg, %String** %13, align 8 + store double %tolerance, double* %14, align 8 + call void @__quantum__qis__assertmeasurementprobability__ctl(%Array* %__controlQubits__, { %Array*, %Array*, %Result*, double, %String*, double }* %8) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__result_update_reference_count(%Result* %result, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %msg, i32 -1) + call void @__quantum__rt__tuple_update_reference_count(%Tuple* %7, i32 -1) + ret void +} + +declare %Array* @__quantum__rt__array_copy(%Array*, i1) + +define internal void @Microsoft__Quantum__Preparation__PrepareSingleQubitIdentity__body(%Qubit* %qubit) { +entry: + %0 = call i2 @Microsoft__Quantum__Random__DrawRandomPauli__body() + %1 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %1, i64 0) + %3 = bitcast i8* %2 to i2* + store i2 %0, i2* %3, align 1 + %4 = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %5 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %4, i64 0) + %6 = bitcast i8* %5 to %Qubit** + store %Qubit* %qubit, %Qubit** %6, align 8 + call void @Microsoft__Quantum__Canon__ApplyPauli__body(%Array* %1, %Array* %4) + call void @__quantum__rt__array_update_reference_count(%Array* %1, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %4, i32 -1) + ret void +} diff --git a/src/munchkin/tests/qsharp/repeat-until/repeat-until.csproj b/src/munchkin/tests/qsharp/repeat-until/repeat-until.csproj new file mode 100644 index 0000000..f930576 --- /dev/null +++ b/src/munchkin/tests/qsharp/repeat-until/repeat-until.csproj @@ -0,0 +1,22 @@ + + + + Library + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + + + + + + Always + + + + diff --git a/src/munchkin/tests/qsharp/repeat-until/repeat_until.qs b/src/munchkin/tests/qsharp/repeat-until/repeat_until.qs new file mode 100644 index 0000000..a8e6d7b --- /dev/null +++ b/src/munchkin/tests/qsharp/repeat-until/repeat_until.qs @@ -0,0 +1,114 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +namespace Microsoft.Quantum.Samples.RepeatUntilSuccess { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Preparation; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Measurement; + + /// # Summary + /// Example of a Repeat-until-success algorithm implementing a circuit + /// that achieves (I + i√2X)/√3 by Paetznick & Svore. This is the smallest + /// circuit found in the referenced work and described in figure 8. + /// # References + /// - [ *Adam Paetznick, Krysta M. Svore*, + /// Quantum Information & Computation 14(15 & 16): 1277-1301 (2014) + /// ](https://arxiv.org/abs/1311.1074) + /// For circuit diagram, see file SimpleRUS.png. + /// + /// # Input + /// ## inputBasis + /// Pauli basis in which to prepare input qubit + /// ## inputValue + /// Boolean value for input qubit (true maps to One, false maps to Zero) + /// ## limit + /// Integer limit to number of repeats of circuit + /// + /// # Remarks + /// The program executes a circuit on a "target" qubit using an "auxiliary" + /// qubit. + /// The goal is to measure Zero for the auxiliary qubit. + /// If this succeeds, the program will have effectively applied an + /// (I + i√2X)/√3 gate on the target qubit. + /// If this fails, the program reruns the circuit up to times. + operation CreateQubitsAndApplySimpleGate( + inputValue : Bool, + inputBasis : Pauli, + limit : Int + ) + : ( Bool, Result, Int ) { + use register = Qubit[2]; + let (success, numIter) = ApplySimpleGate( + inputBasis, inputValue, limit, register); + let result = Measure([inputBasis], [register[1]]); + return (success, result, numIter); + } + + /// # Summary + /// Apply (I + i√2X)/√3 on qubits using repeat until success algorithm. + /// + /// # Input + /// ## inputBasis + /// Pauli basis in which to prepare input qubit + /// ## inputValue + /// Boolean value for input qubit (true maps to One, false maps to Zero) + /// ## limit + /// Integer limit to number of repeats of circuit + /// ## register + /// Qubit register including auxiliary and target qubits + /// + /// # Output + /// Tuple of (success, numIter) where success = false if the number of + /// iterations (numIter) exceeds the input + operation ApplySimpleGate( + inputBasis : Pauli, + inputValue : Bool, + limit : Int, + register : Qubit[] + ) + : (Bool, Int) { + // Initialize results to One by default. + mutable done = false; + mutable success = false; + mutable numIter = 0; + // Prepare target qubit in |0⟩ or |1⟩ state, depending on input value + if (inputValue) { + X(register[1]); + } + PreparePauliEigenstate(inputBasis, register[1]); + + repeat { + // Assert valid starting states for all qubits + AssertMeasurement([PauliZ], [register[0]], Zero, + "Auxiliary qubit is not in |0⟩ state."); + AssertQubitIsInState(register[1], inputBasis, inputValue); + ApplySimpleRUSCircuit(register); + set success = MResetZ(register[0]) == Zero; + set done = success or (numIter >= limit); + set numIter = numIter + 1; + } + until (done); + return (success, numIter); + } + + /// # Summary + /// Apply RUS circuit on qubit register + /// + /// # Input + /// ## register + /// Qubit register including auxiliary and target qubits + operation ApplySimpleRUSCircuit( + register : Qubit[] + ) + : Unit { + H(register[0]); + T(register[0]); + CNOT(register[0], register[1]); + H(register[0]); + CNOT(register[0], register[1]); + T(register[0]); + H(register[0]); + } +} \ No newline at end of file diff --git a/src/munchkin/tests/qsharp/repeat-until/vgaterus.qs b/src/munchkin/tests/qsharp/repeat-until/vgaterus.qs new file mode 100644 index 0000000..0f28f45 --- /dev/null +++ b/src/munchkin/tests/qsharp/repeat-until/vgaterus.qs @@ -0,0 +1,224 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +namespace Microsoft.Quantum.Samples.RepeatUntilSuccess { + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Math; + open Microsoft.Quantum.Preparation; + open Microsoft.Quantum.Diagnostics; + + /// # Summary + /// Example of a Repeat-until-success algorithm implementing a circuit + /// that achieves exp(i⋅ArcTan(2)⋅Z) by Paetznick & Svore. + /// The exp(𝑖 ArcTan(2) 𝑍) operation is also known as the "𝑉 gate." + /// # References + /// - [ *Adam Paetznick, Krysta M. Svore*, + /// Quantum Information & Computation 14(15 & 16): 1277-1301 (2014) + /// ](https://arxiv.org/abs/1311.1074) + /// For circuit diagram, see file RUS.png. + /// + /// # Input + /// ## inputBasis + /// Pauli basis in which to prepare input qubit + /// ## inputValue + /// Boolean value for input qubit (true maps to One, false maps to Zero) + /// ## limit + /// Integer limit to number of repeats of circuit + /// + /// # Remarks + /// The program executes a circuit on a "target" qubit using an "auxiliary" + /// and "resource" qubit. The circuit consists of two parts (red and blue + /// in image). + /// The goal is to measure Zero for both the auxiliary and resource qubit. + /// If this succeeds, the program will have effectively applied an + /// Rz(arctan(2)) gate (also known as V_3 gate) on the target qubit. + /// If this fails, the program reruns the circuit up to times. + operation CreateQubitsAndApplyRzArcTan2( + inputValue : Bool, + inputBasis : Pauli, + limit : Int + ) + : (Bool, Result, Int) { + use auxiliary = Qubit(); + use resource = Qubit(); + use target = Qubit(); + + // Initialize qubits to starting values (|+⟩, |+⟩, |0⟩/|1⟩) + InitializeQubits( + inputBasis, inputValue, auxiliary, resource, target + ); + let (success, numIter) = ApplyRzArcTan2( + inputBasis, inputValue, limit, auxiliary, resource, target); + let result = Measure([inputBasis], [target]); + + // From version 0.12 it is no longer necessary to release qubits + // in zero state. + ResetAll([target, resource, auxiliary]); + return (success, result, numIter); + } + + /// # Summary + /// Apply Rz(arctan(2)) on qubits using repeat until success algorithm. + /// + /// # Input + /// ## inputBasis + /// Pauli basis in which to prepare input qubit + /// ## inputValue + /// Boolean value for input qubit (true maps to One, false maps to Zero) + /// ## limit + /// Integer limit to number of repeats of circuit + /// ## auxiliary + /// Auxiliary qubit + /// ## resource + /// Resource qubit + /// ## target + /// Target qubit + /// + /// # Output + /// Tuple of (success, numIter) where success = false if the number of + /// iterations (numIter) exceeds the input + operation ApplyRzArcTan2( + inputBasis : Pauli, + inputValue : Bool, + limit : Int, + auxiliary : Qubit, + resource : Qubit, + target : Qubit + ) + : (Bool, Int) { + // Initialize results to One by default. + mutable done = false; + mutable success = false; + mutable numIter = 0; + + repeat { + // Assert valid starting states for all qubits + AssertMeasurement([PauliX], [auxiliary], Zero, + "Auxiliary qubit is not in |+⟩ state."); + AssertMeasurement([PauliX], [resource], Zero, + "Resource qubit is not in |+⟩ state."); + AssertQubitIsInState(target, inputBasis, inputValue); + + // Run Part 1 of the program. + let result1 = ApplyAndMeasurePart1(auxiliary, resource); + // We'll only run Part 2 if Part 1 returns Zero. + // Otherwise, we'll skip and rerun Part 1 again. + if (result1 == Zero) { //|0+⟩ + let result2 = ApplyAndMeasurePart2(resource, target); + if (result2 == Zero) { //|00⟩ + set success = true; + } else { //|01⟩ + Z(resource); // Reset resource from |-⟩ to |+⟩ + Adjoint Z(target); // Correct effective Z rotation on target + } + } else { //|1+⟩ + // Set auxiliary and resource qubit back to |+⟩ + Z(auxiliary); + Reset(resource); + H(resource); + } + set done = success or (numIter >= limit); + set numIter = numIter + 1; + } + until (done); + return (success, numIter); + } + + /// # Summary + /// Initialize auxiliary and resource qubits in |+⟩, target in |0⟩ or |1⟩. + /// + /// # Input + /// ## inputBasis + /// Pauli basis in which to prepare input qubit + /// ## inputValue + /// Boolean value for input qubit (true maps to One, false maps to Zero) + /// ## limit + /// Integer limit to number of repeats of circuit + /// ## auxiliary + /// Auxiliary qubit + /// ## resource + /// Resource qubit + /// ## target + /// Target qubit + operation InitializeQubits( + inputBasis : Pauli, + inputValue : Bool, + auxiliary : Qubit, + resource : Qubit, + target : Qubit + ) + : Unit { + // Prepare auxiliary and resource qubits in |+⟩ state + H(auxiliary); + H(resource); + + // Prepare target qubit in |0⟩ or |1⟩ state, depending on input value + if (inputValue) { + X(target); + } + PreparePauliEigenstate(inputBasis, target); + } + + /// # Summary + /// Apply part 1 of RUS circuit (red circuit shown in README) and measure + /// auxiliary qubit in Pauli X basis + /// + /// # Input + /// ## auxiliary + /// Auxiliary qubit + /// ## resource + /// Resource qubit + operation ApplyAndMeasurePart1( + auxiliary : Qubit, + resource : Qubit + ) + : Result { + within { + T(auxiliary); + } apply { + CNOT(resource, auxiliary); + } + + return Measure([PauliX], [auxiliary]); + } + + /// # Summary + /// Apply part 2 of RUS circuit (blue circuit shown in README) and measure + /// resource qubit in Pauli X basis + /// + /// # Input + /// ## resource + /// Resource qubit + /// ## target + /// Target qubit + operation ApplyAndMeasurePart2(resource : Qubit, target : Qubit) : Result { + T(target); + Z(target); + CNOT(target, resource); + T(resource); + + return Measure([PauliX], [resource]); + } + + /// # Summary + /// Assert target qubit state is the desired input value in the desired + /// input basis. + /// + /// ## target + /// Target qubit + /// ## inputBasis + /// Pauli basis in which to prepare input qubit + /// ## inputValue + /// Boolean value for input qubit (true maps to One, false maps to Zero) + operation AssertQubitIsInState( + target : Qubit, + inputBasis : Pauli, + inputValue : Bool + ) + : Unit { + AssertMeasurement( + [inputBasis], [target], inputValue ? One | Zero, + $"Qubit is not in {inputValue ? One | Zero} state for given input basis." + ); + } +} \ No newline at end of file diff --git a/src/munchkin/tests/qsharp/simplified-oracle-generator/Library.qs b/src/munchkin/tests/qsharp/simplified-oracle-generator/Library.qs new file mode 100644 index 0000000..f7b0047 --- /dev/null +++ b/src/munchkin/tests/qsharp/simplified-oracle-generator/Library.qs @@ -0,0 +1,29 @@ +// Copyright (c) Microsoft Corporation. +// Licensed under the MIT License. +namespace Microsoft.Quantum.OracleGenerator { + open Microsoft.Quantum.Canon; + open Microsoft.Quantum.Diagnostics; + open Microsoft.Quantum.Intrinsic; + open Microsoft.Quantum.Measurement; + + @EntryPoint() + operation RunProgram() : Unit { + use (a, b, c) = (Qubit(), Qubit(), Qubit()); + use f = Qubit(); + + for ca in [false, true] { + for cb in [false, true] { + for cc in [false, true] { + within { + if ca { X(a); } + if cb { X(b); } + if cc { X(c); } + } apply { + let result = IsResultOne(MResetZ(f)); + Message($"{cc} {cb} {ca} -> {result}"); + } + } + } + } + } +} \ No newline at end of file diff --git a/src/munchkin/tests/qsharp/simplified-oracle-generator/libLLVM.dll b/src/munchkin/tests/qsharp/simplified-oracle-generator/libLLVM.dll new file mode 100644 index 0000000..e10836a Binary files /dev/null and b/src/munchkin/tests/qsharp/simplified-oracle-generator/libLLVM.dll differ diff --git a/src/munchkin/tests/qsharp/simplified-oracle-generator/qir/simplified-oracle-generator.ll b/src/munchkin/tests/qsharp/simplified-oracle-generator/qir/simplified-oracle-generator.ll new file mode 100644 index 0000000..2b25a53 --- /dev/null +++ b/src/munchkin/tests/qsharp/simplified-oracle-generator/qir/simplified-oracle-generator.ll @@ -0,0 +1,350 @@ + +%Qubit = type opaque +%Array = type opaque +%Result = type opaque +%String = type opaque + +@0 = internal constant [5 x i8] c"true\00" +@1 = internal constant [6 x i8] c"false\00" +@2 = internal constant [2 x i8] c" \00" +@3 = internal constant [5 x i8] c" -> \00" +@4 = internal constant [3 x i8] c"()\00" + +define internal void @Microsoft__Quantum__OracleGenerator__RunProgram__body() { +entry: + %a = call %Qubit* @__quantum__rt__qubit_allocate() + %b = call %Qubit* @__quantum__rt__qubit_allocate() + %c = call %Qubit* @__quantum__rt__qubit_allocate() + %f = call %Qubit* @__quantum__rt__qubit_allocate() + %0 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %1 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 0) + %2 = bitcast i8* %1 to i1* + %3 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 1) + %4 = bitcast i8* %3 to i1* + store i1 false, i1* %2, align 1 + store i1 true, i1* %4, align 1 + br label %header__1 + +header__1: ; preds = %exiting__1, %entry + %5 = phi i64 [ 0, %entry ], [ %14, %exiting__1 ] + %6 = icmp sle i64 %5, 1 + br i1 %6, label %body__1, label %exit__1 + +body__1: ; preds = %header__1 + %7 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %0, i64 %5) + %8 = bitcast i8* %7 to i1* + %ca = load i1, i1* %8, align 1 + %9 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %10 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 0) + %11 = bitcast i8* %10 to i1* + %12 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 1) + %13 = bitcast i8* %12 to i1* + store i1 false, i1* %11, align 1 + store i1 true, i1* %13, align 1 + br label %header__2 + +exiting__1: ; preds = %exit__2 + %14 = add i64 %5, 1 + br label %header__1 + +exit__1: ; preds = %header__1 + call void @__quantum__rt__array_update_reference_count(%Array* %0, i32 -1) + call void @__quantum__rt__qubit_release(%Qubit* %f) + call void @__quantum__rt__qubit_release(%Qubit* %a) + call void @__quantum__rt__qubit_release(%Qubit* %b) + call void @__quantum__rt__qubit_release(%Qubit* %c) + ret void + +header__2: ; preds = %exiting__2, %body__1 + %15 = phi i64 [ 0, %body__1 ], [ %24, %exiting__2 ] + %16 = icmp sle i64 %15, 1 + br i1 %16, label %body__2, label %exit__2 + +body__2: ; preds = %header__2 + %17 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %9, i64 %15) + %18 = bitcast i8* %17 to i1* + %cb = load i1, i1* %18, align 1 + %19 = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 2) + %20 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 0) + %21 = bitcast i8* %20 to i1* + %22 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 1) + %23 = bitcast i8* %22 to i1* + store i1 false, i1* %21, align 1 + store i1 true, i1* %23, align 1 + br label %header__3 + +exiting__2: ; preds = %exit__3 + %24 = add i64 %15, 1 + br label %header__2 + +exit__2: ; preds = %header__2 + call void @__quantum__rt__array_update_reference_count(%Array* %9, i32 -1) + br label %exiting__1 + +header__3: ; preds = %exiting__3, %body__2 + %25 = phi i64 [ 0, %body__2 ], [ %51, %exiting__3 ] + %26 = icmp sle i64 %25, 1 + br i1 %26, label %body__3, label %exit__3 + +body__3: ; preds = %header__3 + %27 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %19, i64 %25) + %28 = bitcast i8* %27 to i1* + %cc = load i1, i1* %28, align 1 + br i1 %ca, label %then0__1, label %continue__1 + +then0__1: ; preds = %body__3 + call void @__quantum__qis__x__body(%Qubit* %a) + br label %continue__1 + +continue__1: ; preds = %then0__1, %body__3 + br i1 %cb, label %then0__2, label %continue__2 + +then0__2: ; preds = %continue__1 + call void @__quantum__qis__x__body(%Qubit* %b) + br label %continue__2 + +continue__2: ; preds = %then0__2, %continue__1 + br i1 %cc, label %then0__3, label %continue__3 + +then0__3: ; preds = %continue__2 + call void @__quantum__qis__x__body(%Qubit* %c) + br label %continue__3 + +continue__3: ; preds = %then0__3, %continue__2 + %29 = call %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %f) + %__qsVar0__result__ = call i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %29) + br i1 %cc, label %condTrue__1, label %condFalse__1 + +condTrue__1: ; preds = %continue__3 + %30 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @0, i32 0, i32 0)) + br label %condContinue__1 + +condFalse__1: ; preds = %continue__3 + %31 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @1, i32 0, i32 0)) + br label %condContinue__1 + +condContinue__1: ; preds = %condFalse__1, %condTrue__1 + %32 = phi %String* [ %30, %condTrue__1 ], [ %31, %condFalse__1 ] + %33 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i32 0, i32 0)) + %34 = call %String* @__quantum__rt__string_concatenate(%String* %32, %String* %33) + call void @__quantum__rt__string_update_reference_count(%String* %32, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %33, i32 -1) + br i1 %cb, label %condTrue__2, label %condFalse__2 + +condTrue__2: ; preds = %condContinue__1 + %35 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @0, i32 0, i32 0)) + br label %condContinue__2 + +condFalse__2: ; preds = %condContinue__1 + %36 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @1, i32 0, i32 0)) + br label %condContinue__2 + +condContinue__2: ; preds = %condFalse__2, %condTrue__2 + %37 = phi %String* [ %35, %condTrue__2 ], [ %36, %condFalse__2 ] + %38 = call %String* @__quantum__rt__string_concatenate(%String* %34, %String* %37) + call void @__quantum__rt__string_update_reference_count(%String* %34, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %37, i32 -1) + %39 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([2 x i8], [2 x i8]* @2, i32 0, i32 0)) + %40 = call %String* @__quantum__rt__string_concatenate(%String* %38, %String* %39) + call void @__quantum__rt__string_update_reference_count(%String* %38, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %39, i32 -1) + br i1 %ca, label %condTrue__3, label %condFalse__3 + +condTrue__3: ; preds = %condContinue__2 + %41 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @0, i32 0, i32 0)) + br label %condContinue__3 + +condFalse__3: ; preds = %condContinue__2 + %42 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @1, i32 0, i32 0)) + br label %condContinue__3 + +condContinue__3: ; preds = %condFalse__3, %condTrue__3 + %43 = phi %String* [ %41, %condTrue__3 ], [ %42, %condFalse__3 ] + %44 = call %String* @__quantum__rt__string_concatenate(%String* %40, %String* %43) + call void @__quantum__rt__string_update_reference_count(%String* %40, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %43, i32 -1) + %45 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @3, i32 0, i32 0)) + %46 = call %String* @__quantum__rt__string_concatenate(%String* %44, %String* %45) + call void @__quantum__rt__string_update_reference_count(%String* %44, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %45, i32 -1) + br i1 %__qsVar0__result__, label %condTrue__4, label %condFalse__4 + +condTrue__4: ; preds = %condContinue__3 + %47 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([5 x i8], [5 x i8]* @0, i32 0, i32 0)) + br label %condContinue__4 + +condFalse__4: ; preds = %condContinue__3 + %48 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([6 x i8], [6 x i8]* @1, i32 0, i32 0)) + br label %condContinue__4 + +condContinue__4: ; preds = %condFalse__4, %condTrue__4 + %49 = phi %String* [ %47, %condTrue__4 ], [ %48, %condFalse__4 ] + %50 = call %String* @__quantum__rt__string_concatenate(%String* %46, %String* %49) + call void @__quantum__rt__string_update_reference_count(%String* %46, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %49, i32 -1) + call void @__quantum__rt__message(%String* %50) + br i1 %cc, label %then0__4, label %continue__4 + +then0__4: ; preds = %condContinue__4 + call void @__quantum__qis__x__body(%Qubit* %c) + br label %continue__4 + +continue__4: ; preds = %then0__4, %condContinue__4 + br i1 %cb, label %then0__5, label %continue__5 + +then0__5: ; preds = %continue__4 + call void @__quantum__qis__x__body(%Qubit* %b) + br label %continue__5 + +continue__5: ; preds = %then0__5, %continue__4 + br i1 %ca, label %then0__6, label %continue__6 + +then0__6: ; preds = %continue__5 + call void @__quantum__qis__x__body(%Qubit* %a) + br label %continue__6 + +continue__6: ; preds = %then0__6, %continue__5 + call void @__quantum__rt__result_update_reference_count(%Result* %29, i32 -1) + call void @__quantum__rt__string_update_reference_count(%String* %50, i32 -1) + br label %exiting__3 + +exiting__3: ; preds = %continue__6 + %51 = add i64 %25, 1 + br label %header__3 + +exit__3: ; preds = %header__3 + call void @__quantum__rt__array_update_reference_count(%Array* %19, i32 -1) + br label %exiting__2 +} + +declare %Qubit* @__quantum__rt__qubit_allocate() + +declare %Array* @__quantum__rt__qubit_allocate_array(i64) + +declare void @__quantum__rt__qubit_release(%Qubit*) + +declare %Array* @__quantum__rt__array_create_1d(i32, i64) + +declare i8* @__quantum__rt__array_get_element_ptr_1d(%Array*, i64) + +declare void @__quantum__qis__x__body(%Qubit*) + +define internal i1 @Microsoft__Quantum__Canon__IsResultOne__body(%Result* %input) { +entry: + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %input, %Result* %0) + ret i1 %1 +} + +define internal %Result* @Microsoft__Quantum__Measurement__MResetZ__body(%Qubit* %target) { +entry: + %result = call %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %target) + %0 = call %Result* @__quantum__rt__result_get_one() + %1 = call i1 @__quantum__rt__result_equal(%Result* %result, %Result* %0) + br i1 %1, label %then0__1, label %continue__1 + +then0__1: ; preds = %entry + call void @__quantum__qis__x__body(%Qubit* %target) + br label %continue__1 + +continue__1: ; preds = %then0__1, %entry + ret %Result* %result +} + +declare %String* @__quantum__rt__string_create(i8*) + +declare void @__quantum__rt__string_update_reference_count(%String*, i32) + +declare %String* @__quantum__rt__string_concatenate(%String*, %String*) + +declare void @__quantum__rt__message(%String*) + +declare void @__quantum__rt__result_update_reference_count(%Result*, i32) + +declare void @__quantum__rt__array_update_reference_count(%Array*, i32) + +declare %Result* @__quantum__rt__result_get_one() + +declare i1 @__quantum__rt__result_equal(%Result*, %Result*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__M__body(%Qubit* %qubit) { +entry: + %bases = call %Array* @__quantum__rt__array_create_1d(i32 1, i64 1) + %0 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %bases, i64 0) + %1 = bitcast i8* %0 to i2* + store i2 -2, i2* %1, align 1 + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + %qubits = call %Array* @__quantum__rt__array_create_1d(i32 8, i64 1) + %2 = call i8* @__quantum__rt__array_get_element_ptr_1d(%Array* %qubits, i64 0) + %3 = bitcast i8* %2 to %Qubit** + store %Qubit* %qubit, %Qubit** %3, align 8 + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %4 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_reference_count(%Array* %qubits, i32 -1) + ret %Result* %4 +} + +declare void @__quantum__rt__array_update_alias_count(%Array*, i32) + +declare %Result* @__quantum__qis__measure__body(%Array*, %Array*) + +define internal %Result* @Microsoft__Quantum__Intrinsic__Measure__body(%Array* %bases, %Array* %qubits) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 1) + %0 = call %Result* @__quantum__qis__measure__body(%Array* %bases, %Array* %qubits) + call void @__quantum__rt__array_update_alias_count(%Array* %bases, i32 -1) + call void @__quantum__rt__array_update_alias_count(%Array* %qubits, i32 -1) + ret %Result* %0 +} + +define internal void @Microsoft__Quantum__Intrinsic__X__body(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__adj(%Qubit* %qubit) { +entry: + call void @__quantum__qis__x__body(%Qubit* %qubit) + ret void +} + +define internal void @Microsoft__Quantum__Intrinsic__X__ctl(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +declare void @__quantum__qis__x__ctl(%Array*, %Qubit*) + +define internal void @Microsoft__Quantum__Intrinsic__X__ctladj(%Array* %__controlQubits__, %Qubit* %qubit) { +entry: + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 1) + call void @__quantum__qis__x__ctl(%Array* %__controlQubits__, %Qubit* %qubit) + call void @__quantum__rt__array_update_alias_count(%Array* %__controlQubits__, i32 -1) + ret void +} + +define void @Microsoft__Quantum__OracleGenerator__RunProgram__Interop() #0 { +entry: + call void @Microsoft__Quantum__OracleGenerator__RunProgram__body() + ret void +} + +define void @Microsoft__Quantum__OracleGenerator__RunProgram() #1 { +entry: + call void @Microsoft__Quantum__OracleGenerator__RunProgram__body() + %0 = call %String* @__quantum__rt__string_create(i8* getelementptr inbounds ([3 x i8], [3 x i8]* @4, i32 0, i32 0)) + call void @__quantum__rt__message(%String* %0) + call void @__quantum__rt__string_update_reference_count(%String* %0, i32 -1) + ret void +} + +attributes #0 = { "InteropFriendly" } +attributes #1 = { "EntryPoint" } diff --git a/src/munchkin/tests/qsharp/simplified-oracle-generator/simplified-oracle-generator.csproj b/src/munchkin/tests/qsharp/simplified-oracle-generator/simplified-oracle-generator.csproj new file mode 100644 index 0000000..32693fd --- /dev/null +++ b/src/munchkin/tests/qsharp/simplified-oracle-generator/simplified-oracle-generator.csproj @@ -0,0 +1,17 @@ + + + + Exe + net6.0 + true + Detailed + $(NETCoreSdkRuntimeIdentifier) + + + + + Always + + + + diff --git a/src/munchkin/tests/rust_python_integration.py b/src/munchkin/tests/rust_python_integration.py new file mode 100644 index 0000000..20da374 --- /dev/null +++ b/src/munchkin/tests/rust_python_integration.py @@ -0,0 +1,49 @@ +from typing import Dict + + +def build_args(): + return [1, "Dave", 2.5] + + +def build_invalid_args(): + return [None, object(), BuilderAdaptor()] + + +class BuilderAdaptor: + def __init__(self): + self.gates = [] + + def cx(self, controls, target, radii): + self.gates.append(f"cx {controls} {target} {radii}") + + def cz(self, controls, target, radii): + self.gates.append(f"cz {controls} {target} {radii}") + + def cy(self, controls, target, radii): + self.gates.append(f"cy {controls} {target} {radii}") + + def x(self, qubit, radii): + self.gates.append(f"x {qubit} {radii}") + + def y(self, qubit, radii): + self.gates.append(f"y {qubit} {radii}") + + def z(self, qubit, radii): + self.gates.append(f"z {qubit} {radii}") + + def swap(self, qubit1, qubit2): + self.gates.append(f"swap {qubit1} {qubit2}") + + def reset(self, qubit): + self.gates.append(f"reset {qubit}") + + def measure(self, qubit, register): + self.gates.append(f"measure {qubit} {register}") + + def clear(self): + self.gates.append("clear") + + +class RuntimeAdaptor: + def execute(self, _: BuilderAdaptor) -> Dict[str, int]: + return {"00": 250, "01": 250, "10": 250, "11": 251} diff --git a/src/munchkin/tests/test_munchkin.py b/src/munchkin/tests/test_munchkin.py new file mode 100644 index 0000000..fe7893e --- /dev/null +++ b/src/munchkin/tests/test_munchkin.py @@ -0,0 +1,143 @@ +from os.path import abspath, dirname, join + +from .file_utils import get_qir_path +from pykin.simulators import fetch_qasm_runtime +from pykin.runtime import ( + BuilderAdaptor, + RuntimeAdaptor, MunchkinRuntime, +) + +def fetch_project_ll(proj_name: str): + """Return a Munchkin test file for processing via the Python APIs.""" + return abspath( + join( + dirname(__file__), + "qsharp", + proj_name, + "qir", + f"{proj_name}.ll", + ) + ) + +class BuilderMock(BuilderAdaptor): + def __init__(self): + self.gates = [] + + def cx(self, controls, target, radii): + self.gates.append(f"cx {controls} {target} {radii}") + + def cz(self, controls, target, radii): + self.gates.append(f"cz {controls} {target} {radii}") + + def cy(self, controls, target, radii): + self.gates.append(f"cy {controls} {target} {radii}") + + def x(self, qubit, radii): + self.gates.append(f"x {qubit} {radii}") + + def y(self, qubit, radii): + self.gates.append(f"y {qubit} {radii}") + + def z(self, qubit, radii): + self.gates.append(f"z {qubit} {radii}") + + def swap(self, qubit1, qubit2): + self.gates.append(f"swap {qubit1} {qubit2}") + + def reset(self, qubit): + self.gates.append(f"reset {qubit}") + + def measure(self, qubit): + self.gates.append(f"measure {qubit}") + + def clear(self): + if any(self.gates): + self.gates.append("clear") + + +class RuntimeMock(RuntimeAdaptor): + def __init__(self): + self.executed = [] + + def execute(self, builder: BuilderMock): + self.executed = builder.gates + return dict() + + +def fetch_mock_runtime(): + return MunchkinRuntime(BuilderMock(), RuntimeMock()) + +class TestMunchkin: + def test_qaoa(self): + qir = fetch_project_ll("qaoa") + runtime = fetch_qasm_runtime(20) + results = runtime.run(qir) + + # This prints its result, not returns. + assert results is None + + def test_oracle_gen(self): + qir = fetch_project_ll("oracle-generator") + runtime = fetch_qasm_runtime(20) + results = runtime.run(qir) + + assert results is None + + def test_minified_generator(self): + qir = fetch_project_ll("minified-oracle-generator") + runtime = fetch_qasm_runtime(20) + results = runtime.run(qir, [True]) + + assert results is None + + def test_parser_bell_psi_plus(self): + runtime = fetch_mock_runtime() + runtime.run(get_qir_path("bell_psi_plus.ll")) + + assert runtime.builder.gates == [ + "z 0 3.141592653589793", + "y 0 1.5707963267948966", + "cx [0] 1 3.141592653589793", + "measure 0", + "measure 1", + ] + + def test_parser_bell_psi_minus(self): + runtime = fetch_mock_runtime() + runtime.run(get_qir_path("bell_psi_minus.ll")) + + assert runtime.builder.gates == [ + "x 0 3.141592653589793", + "z 0 3.141592653589793", + "y 0 1.5707963267948966", + "cx [0] 1 3.141592653589793", + "measure 0", + "measure 1", + ] + + def test_parser_bell_theta_plus(self): + runtime = fetch_mock_runtime() + runtime.run(get_qir_path("bell_theta_plus.ll")) + + assert runtime.builder.gates == [ + "x 1 3.141592653589793", + "z 0 3.141592653589793", + "y 0 1.5707963267948966", + "cx [0] 1 3.141592653589793", + "measure 0", + "measure 1", + ] + + def test_parser_bell_theta_minus(self): + runtime = fetch_mock_runtime() + runtime.run(get_qir_path("bell_theta_minus.ll")) + + assert runtime.builder.gates == [ + "x 1 3.141592653589793", + "x 0 3.141592653589793", + "z 0 3.141592653589793", + "y 0 1.5707963267948966", + "cx [0] 1 3.141592653589793", + "measure 0", + "measure 1", + ]